diff --git a/.coverage b/.coverage deleted file mode 100644 index 96a55d2f..00000000 Binary files a/.coverage and /dev/null differ diff --git a/README.md b/README.md index 936f2bfa..c368588d 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,11 @@ A powerful, feature-rich command-line interface for interacting with Model Conte **Default Configuration**: MCP CLI defaults to using Ollama with the `gpt-oss` reasoning model for local, privacy-focused operation without requiring API keys. -## 🆕 Recent Updates (v0.11) +## 🆕 Recent Updates (v0.11.1) ### Architecture & Performance -- **Updated to chuk-llm v0.14**: Dynamic model discovery with capability-based selection, llama.cpp integration (1.53x faster), 52x faster imports -- **Updated to chuk-tool-processor v0.11.2**: Production-grade middleware with timeouts, retries, circuit breakers, and comprehensive observability +- **Updated to chuk-llm v0.16+**: Dynamic model discovery with capability-based selection, llama.cpp integration (1.53x faster), 52x faster imports +- **Updated to chuk-tool-processor v0.13+**: Now using CTP's production-grade middleware (retry, circuit breaker, rate limiting) - **Slimmed ToolManager**: Reduced from 2000+ lines to ~800 lines by delegating to StreamManager while keeping OAuth, filtering, and LLM adaptation ### Reliability Improvements @@ -28,7 +28,7 @@ A powerful, feature-rich command-line interface for interacting with Model Conte The MCP CLI is built on a modular architecture with clean separation of concerns: -- **[CHUK Tool Processor](https://github.com/chrishayuk/chuk-tool-processor)**: Production-grade async tool execution with middleware (timeouts, retries, circuit breakers), multiple execution strategies (in-process, subprocess, remote MCP), and comprehensive observability +- **[CHUK Tool Processor](https://github.com/chrishayuk/chuk-tool-processor)**: Production-grade async tool execution with middleware (retry, circuit breaker, rate limiting), multiple execution strategies, and observability - **[CHUK-LLM](https://github.com/chrishayuk/chuk-llm)**: Unified LLM provider with dynamic model discovery, capability-based selection, and llama.cpp integration (1.53x faster than Ollama with automatic model reuse) - **[CHUK-Term](https://github.com/chrishayuk/chuk-term)**: Enhanced terminal UI with themes, cross-platform terminal management, and rich formatting - **MCP CLI**: Command orchestration and integration layer (this project) @@ -43,7 +43,7 @@ The MCP CLI is built on a modular architecture with clean separation of concerns ### Advanced Chat Interface - **Streaming Responses**: Real-time response generation with live UI updates -- **Reasoning Visibility**: See AI's thinking process with reasoning models (gpt-oss, GPT-5, Claude 4) +- **Reasoning Visibility**: See AI's thinking process with reasoning models (gpt-oss, GPT-5, Claude 4.5) - **Concurrent Tool Execution**: Execute multiple tools simultaneously while preserving conversation order - **Smart Interruption**: Interrupt streaming responses or tool execution with Ctrl+C - **Performance Metrics**: Response timing, words/second, and execution statistics @@ -57,7 +57,7 @@ MCP CLI supports all providers and models from CHUK-LLM, including cutting-edge |----------|------------|------------------| | **Ollama** (Default) | 🧠 gpt-oss, llama3.3, llama3.2, qwen3, qwen2.5-coder, deepseek-coder, granite3.3, mistral, gemma3, phi3, codellama | Local reasoning models, privacy-focused, no API key required | | **OpenAI** | 🚀 GPT-5 family (gpt-5, gpt-5-mini, gpt-5-nano), GPT-4o family, O3 series (o3, o3-mini) | Advanced reasoning, function calling, vision | -| **Anthropic** | 🧠 Claude 4 family (claude-4-1-opus, claude-4-sonnet), Claude 3.5 Sonnet | Enhanced reasoning, long context | +| **Anthropic** | 🧠 Claude 4.5 family (claude-4-5-opus, claude-4-5-sonnet), Claude 3.5 Sonnet | Enhanced reasoning, long context | | **Azure OpenAI** 🏢 | Enterprise GPT-5, GPT-4 models | Private endpoints, compliance, audit logs | | **Google Gemini** | Gemini 2.0 Flash, Gemini 1.5 Pro | Multimodal, fast inference | | **Groq** ⚡ | Llama 3.1 models, Mixtral | Ultra-fast inference (500+ tokens/sec) | @@ -65,7 +65,7 @@ MCP CLI supports all providers and models from CHUK-LLM, including cutting-edge | **IBM watsonx** 🏢 | Granite, Llama models | Enterprise compliance | | **Mistral AI** 🇪🇺 | Mistral Large, Medium | European, efficient models | -### Robust Tool System (Powered by CHUK Tool Processor v0.11+) +### Robust Tool System (Powered by CHUK Tool Processor v0.13+) - **Automatic Discovery**: Server-provided tools are automatically detected and catalogued - **Provider Adaptation**: Tool names are automatically sanitized for provider compatibility - **Production-Grade Execution**: Middleware layers with timeouts, retries, exponential backoff, caching, and circuit breakers @@ -73,7 +73,7 @@ MCP CLI supports all providers and models from CHUK-LLM, including cutting-edge - **Concurrent Execution**: Multiple tools can run simultaneously with proper coordination - **Rich Progress Display**: Real-time progress indicators and execution timing - **Tool History**: Complete audit trail of all tool executions -- **Observability**: Built-in OpenTelemetry tracing and Prometheus metrics for production monitoring +- **Middleware**: Retry with exponential backoff, circuit breakers, and rate limiting via CTP - **Streaming Tool Calls**: Support for tools that return streaming data ### Advanced Configuration Management @@ -121,7 +121,7 @@ Comprehensive documentation is available in the `docs/` directory: - Pull the default reasoning model: `ollama pull gpt-oss` - **For Cloud Providers** (Optional): - OpenAI: `OPENAI_API_KEY` environment variable (for GPT-5, GPT-4, O3 models) - - Anthropic: `ANTHROPIC_API_KEY` environment variable (for Claude 4, Claude 3.5) + - Anthropic: `ANTHROPIC_API_KEY` environment variable (for Claude 4.5, Claude 3.5) - Azure: `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT` (for enterprise GPT-5) - Google: `GEMINI_API_KEY` (for Gemini models) - Groq: `GROQ_API_KEY` (for fast Llama models) @@ -186,9 +186,9 @@ mcp-cli --provider openai --model gpt-4o-mini # Smaller GPT-4 mcp-cli --provider openai --model o3 # O3 reasoning mcp-cli --provider openai --model o3-mini # Efficient O3 -# Claude 4 Family (requires Anthropic API key) -mcp-cli --provider anthropic --model claude-4-1-opus # Most advanced Claude -mcp-cli --provider anthropic --model claude-4-sonnet # Balanced Claude 4 +# Claude 4.5 Family (requires Anthropic API key) +mcp-cli --provider anthropic --model claude-4-5-opus # Most advanced Claude +mcp-cli --provider anthropic --model claude-4-5-sonnet # Balanced Claude 4.5 mcp-cli --provider anthropic --model claude-3-5-sonnet # Claude 3.5 # Enterprise Azure (requires Azure configuration) @@ -231,7 +231,7 @@ export LLM_MODEL=gpt-oss # Default model (already the default) # For cloud providers (optional) export OPENAI_API_KEY=sk-... # For GPT-5, GPT-4, O3 models -export ANTHROPIC_API_KEY=sk-ant-... # For Claude 4, Claude 3.5 +export ANTHROPIC_API_KEY=sk-ant-... # For Claude 4.5, Claude 3.5 export AZURE_OPENAI_API_KEY=sk-... # For enterprise GPT-5 export AZURE_OPENAI_ENDPOINT=https://... export GEMINI_API_KEY=... # For Gemini models @@ -254,7 +254,7 @@ mcp-cli --server sqlite # See the AI's thinking process with reasoning models mcp-cli --server sqlite --model gpt-oss # Open-source reasoning mcp-cli --server sqlite --provider openai --model gpt-5 # GPT-5 reasoning -mcp-cli --server sqlite --provider anthropic --model claude-4-1-opus # Claude 4 reasoning +mcp-cli --server sqlite --provider anthropic --model claude-4-5-opus # Claude 4.5 reasoning # Use different local models mcp-cli --server sqlite --model llama3.3 @@ -262,7 +262,7 @@ mcp-cli --server sqlite --model qwen2.5-coder # Switch to cloud providers (requires API keys) mcp-cli chat --server sqlite --provider openai --model gpt-5 -mcp-cli chat --server sqlite --provider anthropic --model claude-4-sonnet +mcp-cli chat --server sqlite --provider anthropic --model claude-4-5-sonnet ``` ### 2. Interactive Mode @@ -311,7 +311,7 @@ mcp-cli models # Show models for specific provider mcp-cli models openai # Shows GPT-5, GPT-4, O3 models -mcp-cli models anthropic # Shows Claude 4, Claude 3.5 models +mcp-cli models anthropic # Shows Claude 4.5, Claude 3.5 models mcp-cli models ollama # Shows gpt-oss, llama3.3, etc. # Ping servers @@ -346,7 +346,7 @@ mcp-cli --server sqlite,filesystem # With advanced reasoning models mcp-cli --server sqlite --provider openai --model gpt-5 -mcp-cli --server sqlite --provider anthropic --model claude-4-1-opus +mcp-cli --server sqlite --provider anthropic --model claude-4-5-opus ``` ### Chat Commands (Slash Commands) @@ -370,7 +370,7 @@ mcp-cli --server sqlite --provider anthropic --model claude-4-1-opus /model # Show current model (default: gpt-oss) /model llama3.3 # Switch to different Ollama model /model gpt-5 # Switch to GPT-5 (if using OpenAI) -/model claude-4-1-opus # Switch to Claude 4 (if using Anthropic) +/model claude-4-5-opus # Switch to Claude 4.5 (if using Anthropic) /models # List available models for current provider ``` @@ -535,7 +535,7 @@ provider openai gpt-5 # Switch to GPT-5 # Model management model # Show current model model gpt-oss # Switch to reasoning model -model claude-4-1-opus # Switch to Claude 4 +model claude-4-5-opus # Switch to Claude 4.5 models # List available models # Tool operations @@ -591,7 +591,7 @@ ls *.txt | parallel mcp-cli cmd --server sqlite --input {} --output {}.summary - ### Ollama Configuration (Default) -Ollama runs locally by default on `http://localhost:11434`. MCP CLI v0.11+ with CHUK-LLM v0.14 includes **llama.cpp integration** that automatically discovers and reuses Ollama's downloaded models for 1.53x faster inference (311 vs 204 tokens/sec) without re-downloading. +Ollama runs locally by default on `http://localhost:11434`. MCP CLI v0.11.1+ with CHUK-LLM v0.16+ includes **llama.cpp integration** that automatically discovers and reuses Ollama's downloaded models for 1.53x faster inference (311 vs 204 tokens/sec) without re-downloading. To use reasoning and other models: @@ -624,7 +624,7 @@ To use cloud providers with advanced models, configure API keys: # Configure OpenAI (for GPT-5, GPT-4, O3 models) mcp-cli provider set openai api_key sk-your-key-here -# Configure Anthropic (for Claude 4, Claude 3.5) +# Configure Anthropic (for Claude 4.5, Claude 3.5) mcp-cli provider set anthropic api_key sk-ant-your-key-here # Configure Azure OpenAI (for enterprise GPT-5) @@ -684,7 +684,7 @@ openai: anthropic: api_base: https://api.anthropic.com - default_model: claude-4-1-opus + default_model: claude-4-5-opus azure_openai: api_base: https://your-resource.openai.azure.com @@ -747,7 +747,7 @@ This means you can: ### Bundled Default Servers -MCP CLI v0.11+ comes with an expanded set of pre-configured servers in the bundled `server_config.json`: +MCP CLI v0.11.1+ comes with an expanded set of pre-configured servers in the bundled `server_config.json`: | Server | Type | Description | Configuration | |--------|------|-------------|---------------| @@ -929,9 +929,9 @@ mcp-cli [See GPT-5's reasoning approach] > /provider anthropic -> /model claude-4-1-opus +> /model claude-4-5-opus > Think through this problem step by step: If a train leaves New York at 3 PM... -[See Claude 4's analytical process] +[See Claude 4.5's analytical process] ``` ### Local-First Workflow with Reasoning @@ -957,7 +957,7 @@ mcp-cli chat --server sqlite > Complex enterprise architecture design... > /provider anthropic -> /model claude-4-1-opus +> /model claude-4-5-opus > Detailed strategic analysis... > /provider ollama @@ -978,13 +978,13 @@ mcp-cli chat --server sqlite > /provider openai gpt-5 # Requires API key > What's the best way to optimize this SQL query? -> /provider anthropic claude-4-sonnet # Requires API key +> /provider anthropic claude-4-5-sonnet # Requires API key > What's the best way to optimize this SQL query? # Use each provider's strengths > /provider ollama gpt-oss # Local reasoning, privacy > /provider openai gpt-5 # Advanced reasoning -> /provider anthropic claude-4-1-opus # Deep analysis +> /provider anthropic claude-4-5-opus # Deep analysis > /provider groq llama-3.1-70b # Ultra-fast responses ``` @@ -1041,7 +1041,7 @@ Provider Diagnostics Provider | Status | Response Time | Features | Models ollama | ✅ Ready | 56ms | 📡🔧 | gpt-oss, llama3.3, qwen3, ... openai | ✅ Ready | 234ms | 📡🔧👁️ | gpt-5, gpt-4o, o3, ... -anthropic | ✅ Ready | 187ms | 📡🔧 | claude-4-1-opus, claude-4-sonnet, ... +anthropic | ✅ Ready | 187ms | 📡🔧 | claude-4-5-opus, claude-4-5-sonnet, ... azure_openai | ✅ Ready | 198ms | 📡🔧👁️ | gpt-5, gpt-4o, ... gemini | ✅ Ready | 156ms | 📡🔧👁️ | gemini-2.0-flash, ... groq | ✅ Ready | 45ms | 📡🔧 | llama-3.1-70b, ... @@ -1091,7 +1091,7 @@ granite3.3 | Available # For cloud providers, check supported models mcp-cli models openai # Shows GPT-5, GPT-4, O3 models - mcp-cli models anthropic # Shows Claude 4, Claude 3.5 models + mcp-cli models anthropic # Shows Claude 4.5, Claude 3.5 models ``` 3. **Provider not found or API key missing**: @@ -1142,21 +1142,21 @@ mcp-cli --log-level DEBUG interactive --server sqlite ### Execution Security - **Tool Validation**: All tool calls are validated before execution -- **Timeout Protection**: Configurable timeouts prevent hanging operations (v0.11.2+) -- **Circuit Breakers**: Automatic failure detection and recovery to prevent cascading failures (v0.11.2+) +- **Timeout Protection**: Configurable timeouts prevent hanging operations (v0.13+) +- **Circuit Breakers**: Automatic failure detection and recovery to prevent cascading failures (v0.13+) - **Server Isolation**: Each server runs in its own process - **File Access**: Filesystem access can be disabled with `--disable-filesystem` - **Transport Monitoring**: Automatic detection of connection failures with warnings (v0.11+) ## 🚀 Performance Features -### LLM Provider Performance (v0.14) +### LLM Provider Performance (v0.16+) - **52x Faster Imports**: Reduced from 735ms to 14ms through lazy loading - **112x Faster Client Creation**: Automatic thread-safe caching - **llama.cpp Integration**: 1.53x faster inference (311 vs 204 tokens/sec) with automatic Ollama model reuse - **Dynamic Model Discovery**: Zero overhead capability-based model selection -### Tool Execution Performance (v0.11.2) +### Tool Execution Performance (v0.13+) - **Production Middleware**: Timeouts, retries with exponential backoff, circuit breakers, and result caching - **Concurrent Tool Execution**: Multiple tools can run simultaneously with proper coordination - **Connection Health Monitoring**: Automatic detection and recovery from transport failures @@ -1176,8 +1176,8 @@ Core dependencies are organized into feature groups: - **cli**: Terminal UI and command framework (Rich, Typer, chuk-term) - **dev**: Development tools, testing utilities, linting -- **chuk-tool-processor v0.11.2+**: Production-grade tool execution with middleware, multiple execution strategies, and observability (OpenTelemetry, Prometheus) -- **chuk-llm v0.14+**: Unified LLM provider with dynamic model discovery, capability-based selection, and llama.cpp integration for 52x faster imports and 112x faster client creation +- **chuk-tool-processor v0.13+**: Production-grade tool execution with middleware, multiple execution strategies, and observability +- **chuk-llm v0.16+**: Unified LLM provider with dynamic model discovery, capability-based selection, and llama.cpp integration for 52x faster imports and 112x faster client creation - **chuk-term**: Enhanced terminal UI with themes, prompts, and cross-platform support Install with specific features: @@ -1249,8 +1249,8 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file ## 🙏 Acknowledgments -- **[CHUK Tool Processor](https://github.com/chrishayuk/chuk-tool-processor)** - Production-grade async tool execution with middleware and observability (v0.11.2+) -- **[CHUK-LLM](https://github.com/chrishayuk/chuk-llm)** - Unified LLM provider with dynamic model discovery, llama.cpp integration, and GPT-5/Claude 4 support (v0.14+) +- **[CHUK Tool Processor](https://github.com/chrishayuk/chuk-tool-processor)** - Production-grade async tool execution with middleware and observability +- **[CHUK-LLM](https://github.com/chrishayuk/chuk-llm)** - Unified LLM provider with dynamic model discovery, llama.cpp integration, and GPT-5/Claude 4.5 support (v0.16+) - **[CHUK-Term](https://github.com/chrishayuk/chuk-term)** - Enhanced terminal UI with themes and cross-platform support - **[Rich](https://github.com/Textualize/rich)** - Beautiful terminal formatting - **[Typer](https://typer.tiangolo.com/)** - CLI framework @@ -1261,5 +1261,5 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file - **[Model Context Protocol](https://modelcontextprotocol.io/)** - Core protocol specification - **[MCP Servers](https://github.com/modelcontextprotocol/servers)** - Official MCP server implementations - **[CHUK Tool Processor](https://github.com/chrishayuk/chuk-tool-processor)** - Production-grade tool execution with middleware and observability -- **[CHUK-LLM](https://github.com/chrishayuk/chuk-llm)** - LLM provider abstraction with dynamic model discovery, GPT-5, Claude 4, O3 series support, and llama.cpp integration +- **[CHUK-LLM](https://github.com/chrishayuk/chuk-llm)** - LLM provider abstraction with dynamic model discovery, GPT-5, Claude 4.5, O3 series support, and llama.cpp integration - **[CHUK-Term](https://github.com/chrishayuk/chuk-term)** - Terminal UI library with themes and cross-platform support \ No newline at end of file diff --git a/examples/basic_llm_call.py b/examples/basic_llm_call.py index 63ffcc47..7a21ac8f 100644 --- a/examples/basic_llm_call.py +++ b/examples/basic_llm_call.py @@ -21,7 +21,7 @@ # imports from chuk_llm.llm.client import get_client -from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator +from mcp_cli.chat.system_prompt import generate_system_prompt # load environment variables load_dotenv() @@ -35,8 +35,8 @@ async def run_llm_diagnostic(provider: str, model: str, prompt: str) -> None: # get the client client = get_client(provider=provider, model=model) - # get the system prompt - system_prompt = SystemPromptGenerator().generate_prompt({}) + # get the system prompt (tools are passed via API, not embedded in prompt) + system_prompt = generate_system_prompt() messages: List[Dict[str, Any]] = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}, diff --git a/examples/mcp_round_trip.py b/examples/mcp_round_trip.py index 04c963c8..781ecb4a 100644 --- a/examples/mcp_round_trip.py +++ b/examples/mcp_round_trip.py @@ -26,9 +26,9 @@ from chuk_tool_processor.core.processor import ToolProcessor from chuk_tool_processor.models.tool_result import ToolResult -# MCP CLI imports - only using llm_client and system_prompt_generator +# MCP CLI imports from chuk_llm.llm.client import get_client -from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator +from mcp_cli.chat.system_prompt import generate_system_prompt # Initialize colorama for colored output colorama_init(autoreset=True) @@ -231,7 +231,8 @@ async def main() -> None: # 5) Send prompt to LLM client = get_client(provider=args.provider, model=args.model) - sys_prompt = SystemPromptGenerator().generate_prompt({"tools": openai_tools}) + # Tools are passed via API, not embedded in system prompt + sys_prompt = generate_system_prompt(openai_tools) messages = [ {"role": "system", "content": sys_prompt}, {"role": "user", "content": args.prompt}, diff --git a/examples/mcp_round_trip_with_toolmanager.py b/examples/mcp_round_trip_with_toolmanager.py index f1613cfa..8d51452f 100644 --- a/examples/mcp_round_trip_with_toolmanager.py +++ b/examples/mcp_round_trip_with_toolmanager.py @@ -23,7 +23,7 @@ # ── MCP & LLM helpers ─────────────────────────────────────────────────── from chuk_llm.llm.client import get_client from mcp_cli.tools.manager import ToolManager -from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator +from mcp_cli.chat.system_prompt import generate_system_prompt colorama_init(autoreset=True) @@ -95,7 +95,8 @@ async def main() -> None: # 3️⃣ Initial LLM call (allow tool usage) client = get_client(provider=args.provider, model=args.model) - sys_prompt = SystemPromptGenerator().generate_prompt({"tools": llm_tools}) + # Tools are passed via API, not embedded in system prompt + sys_prompt = generate_system_prompt(llm_tools) messages: List[Dict[str, str | None]] = [ {"role": "system", "content": sys_prompt}, {"role": "user", "content": args.prompt}, diff --git a/examples/ollama_llm_call.py b/examples/ollama_llm_call.py index 65c03cf5..54f84ef8 100644 --- a/examples/ollama_llm_call.py +++ b/examples/ollama_llm_call.py @@ -20,7 +20,7 @@ # mcp cli imports from chuk_llm.llm.client import get_client -from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator +from mcp_cli.chat.system_prompt import generate_system_prompt async def run_ollama_diagnostic(model: str, prompt: str) -> None: @@ -30,7 +30,7 @@ async def run_ollama_diagnostic(model: str, prompt: str) -> None: except Exception as exc: sys.exit(f"[ERROR] Could not create Ollama client: {exc}") - system_prompt = SystemPromptGenerator().generate_prompt({}) + system_prompt = generate_system_prompt() messages: List[Dict[str, Any]] = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}, diff --git a/examples/tool_args_flow_demo.py b/examples/tool_args_flow_demo.py new file mode 100644 index 00000000..71fbed36 --- /dev/null +++ b/examples/tool_args_flow_demo.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python3 +"""Demo script to trace tool argument flow. + +This shows how arguments flow from model response to tool execution, +helping identify where values might get corrupted or changed. +""" + +import json +import logging +import sys + +sys.path.insert(0, "src") + +from mcp_cli.chat.streaming_handler import ToolCallAccumulator # noqa: E402 + +# Enable debug logging to see all traces +logging.basicConfig(level=logging.DEBUG, format="%(name)s - %(message)s") + + +def demo_accumulator(): + """Demo the tool call accumulator to see how args are merged.""" + print("=" * 60) + print("TOOL CALL ACCUMULATOR DEMO - Normal streaming") + print("=" * 60) + + acc = ToolCallAccumulator() + + # Simulate streaming chunks from model + # Chunk 1: Tool name and start of args + chunk1 = [ + { + "id": "call_123", + "index": 0, + "function": { + "name": "call_tool", + "arguments": '{"tool_name": "normal_cdf"', + }, + } + ] + + # Chunk 2: More args + chunk2 = [ + { + "id": "call_123", + "index": 0, + "function": {"name": "", "arguments": ', "x": 11.03}'}, + } + ] + + print("\n1. CHUNK 1:") + print(f" {json.dumps(chunk1, indent=2)}") + acc.process_chunk_tool_calls(chunk1) + + print("\n2. CHUNK 2:") + print(f" {json.dumps(chunk2, indent=2)}") + acc.process_chunk_tool_calls(chunk2) + + print("\n3. ACCUMULATED RESULT:") + result = acc.finalize() + for tc in result: + print(f" Tool: {tc['function']['name']}") + print(f" Args: {tc['function']['arguments']}") + # Parse to show final values + try: + parsed = json.loads(tc["function"]["arguments"]) + print(f" Parsed: {json.dumps(parsed, indent=6)}") + except json.JSONDecodeError as e: + print(f" Parse error: {e}") + + +def demo_complete_then_partial(): + """Demo what happens when model sends complete JSON then partial update.""" + print("\n" + "=" * 60) + print("COMPLETE THEN PARTIAL DEMO - Same ID scenario") + print("=" * 60) + print("Same call_id means chunks should merge (preserving first values)") + + acc = ToolCallAccumulator() + + # Same ID - these should merge + chunk1 = [ + { + "id": "call_456", + "index": 0, + "function": { + "name": "call_tool", + "arguments": '{"tool_name": "normal_cdf", "x": 9.067}', + }, + } + ] + + chunk2 = [ + { + "id": "call_456", # SAME ID - will merge + "index": 0, + "function": { + "name": "", + "arguments": '{"x": 4.617}', # Different x value - should be ignored + }, + } + ] + + print("\n1. CHUNK 1 (id=call_456, x=9.067):") + acc.process_chunk_tool_calls(chunk1) + + print("\n2. CHUNK 2 (id=call_456, x=4.617) - same ID, should merge:") + acc.process_chunk_tool_calls(chunk2) + + print("\n3. RESULT (should have x=9.067 - first value preserved):") + result = acc.finalize() + for tc in result: + parsed = json.loads(tc["function"]["arguments"]) + print(f" x = {parsed.get('x')}") + if parsed.get("x") == 9.067: + print(" ✓ CORRECT: First value preserved") + else: + print(" ✗ WRONG: Expected 9.067") + + +def demo_different_ids(): + """Demo that different IDs create separate tool calls.""" + print("\n" + "=" * 60) + print("DIFFERENT IDs DEMO - Should NOT merge") + print("=" * 60) + print("Different call_ids mean separate tool calls") + + acc = ToolCallAccumulator() + + # Different IDs - should NOT merge (this was the bug!) + chunk1 = [ + { + "id": "call_AAA", + "index": 0, + "function": { + "name": "call_tool", + "arguments": '{"tool_name": "normal_cdf", "x": 9.067}', + }, + } + ] + + chunk2 = [ + { + "id": "call_BBB", # DIFFERENT ID - should NOT merge! + "index": 0, # Same index - old bug would merge these! + "function": { + "name": "call_tool", + "arguments": '{"tool_name": "normal_cdf", "x": 4.617}', + }, + } + ] + + print("\n1. CHUNK 1 (id=call_AAA, x=9.067):") + acc.process_chunk_tool_calls(chunk1) + + print("\n2. CHUNK 2 (id=call_BBB, x=4.617) - DIFFERENT ID:") + acc.process_chunk_tool_calls(chunk2) + + print("\n3. RESULT (should be TWO separate tool calls):") + result = acc.finalize() + print(f" Number of tool calls: {len(result)}") + for i, tc in enumerate(result): + parsed = json.loads(tc["function"]["arguments"]) + print(f" Tool call {i + 1}: id={tc['id']}, x={parsed.get('x')}") + + if len(result) == 2: + print(" ✓ CORRECT: Two separate tool calls") + else: + print(" ✗ WRONG: Should have 2 tool calls, got", len(result)) + + +def demo_json_merge(): + """Demo JSON string merging behavior.""" + print("\n" + "=" * 60) + print("JSON MERGE BEHAVIOR DEMO") + print("=" * 60) + + acc = ToolCallAccumulator() + + test_cases = [ + # Case 1: Simple concatenation + ('{"x": 11', ".03}", "Simple concat"), + # Case 2: Two complete objects - FIRST should win now + ('{"x": 1}', '{"x": 11.03}', "Two complete objects (1st wins)"), + # Case 3: Overlapping keys - FIRST should win now + ('{"x": 1, "mean": 0}', '{"x": 11.03}', "Overlapping keys (1st wins)"), + # Case 4: First correct value preserved + ('{"x": 11.03}', '{"x": 1}', "First value preserved"), + # Case 5: First has correct value, second corrupted - first wins + ('{"tool_name": "cdf", "x": 11.03}', '{"x": 2.546}', "Correct preserved"), + ] + + for i, (str1, str2, desc) in enumerate(test_cases, 1): + print(f"\n{i}. {desc}") + print(f" String 1: {str1}") + print(f" String 2: {str2}") + result = acc._merge_json_strings(str1, str2) + print(f" Merged: {result}") + try: + parsed = json.loads(result) + print(f" Parsed: {parsed}") + except json.JSONDecodeError as e: + print(f" Parse error: {e}") + + +def demo_argument_parsing(): + """Demo how arguments are parsed in tool_processor.""" + print("\n" + "=" * 60) + print("ARGUMENT PARSING DEMO") + print("=" * 60) + + # Simulate different argument formats the model might send + test_args = [ + '{"tool_name": "normal_cdf", "x": 11.03}', + '{"tool_name": "normal_cdf", "x": 11.03, "mean": 0, "std": 1}', + '{"tool_name": "normal_cdf", "x": null}', + '{"tool_name": "normal_cdf"}', # Missing x + "invalid json", + ] + + for i, raw in enumerate(test_args, 1): + print(f"\n{i}. Raw: {raw}") + try: + if isinstance(raw, str): + parsed = json.loads(raw) if raw.strip() else {} + else: + parsed = raw or {} + print(f" Parsed: {parsed}") + + # Filter for display (like tool_processor does) + display = {k: v for k, v in parsed.items() if k != "tool_name"} + print(f" Display args: {display}") + + # Check for None + none_args = [k for k, v in parsed.items() if v is None] + if none_args: + print(f" WARNING: None values for: {none_args}") + + except json.JSONDecodeError as e: + print(f" Parse error: {e}") + + +def demo_precondition_gate(): + """Demo the precondition gate that blocks premature tool calls.""" + print("\n" + "=" * 60) + print("PRECONDITION GATE DEMO") + print("=" * 60) + print("Parameterized tools are blocked when no values exist in state") + + from mcp_cli.chat.tool_state import get_tool_state, reset_tool_state + + # Reset state for clean test + reset_tool_state() + state = get_tool_state() + + print("\n--- Scenario 1: Premature call (no values in state) ---") + # Model tries to call normal_cdf before computing any values + args_premature = {"x": -0.751501502} # Garbage value from GPT-5.2 + allowed, error = state.check_tool_preconditions("normal_cdf", args_premature) + print(" Tool: normal_cdf") + print(f" Args: {args_premature}") + print(f" Values in state: {len(state.bindings.bindings)}") + print(f" Allowed: {allowed}") + if error: + print(f" Error: {error[:100]}...") + if not allowed: + print(" ✓ CORRECT: Premature call blocked") + else: + print(" ✗ WRONG: Should have been blocked") + + print("\n--- Scenario 2: After computing a value ---") + # Simulate computing Z and storing it + state.bind_value("sqrt", {"x": 666}, 25.807) # sqrt(666) for sigma_LT + state.bind_value("divide", {"a": 234, "b": 25.807}, 9.067) # Z = 234/25.807 + + print(f" Computed values in state: {len(state.bindings.bindings)}") + for vid, binding in state.bindings.bindings.items(): + print(f" ${vid} = {binding.typed_value} (from {binding.tool_name})") + + # Now try calling normal_cdf with the computed Z + args_valid = {"x": 9.067} + allowed, error = state.check_tool_preconditions("normal_cdf", args_valid) + print("\n Tool: normal_cdf") + print(f" Args: {args_valid}") + print(f" Allowed: {allowed}") + if allowed: + print(" ✓ CORRECT: Call allowed after values computed") + else: + print(f" ✗ WRONG: Should have been allowed. Error: {error}") + + print("\n--- Scenario 3: Discovery tools are NOT gated ---") + reset_tool_state() + state = get_tool_state() + + # Discovery tools like search_tools should always be allowed + args_search = {"query": "cdf"} + allowed, error = state.check_tool_preconditions("search_tools", args_search) + print(" Tool: search_tools") + print(f" Args: {args_search}") + print(f" Allowed: {allowed}") + if allowed: + print(" ✓ CORRECT: Discovery tool not gated") + else: + print(" ✗ WRONG: Discovery tools should not be gated") + + +def demo_model_sends_garbage(): + """Demo proving the model sends garbage x values.""" + print("\n" + "=" * 60) + print("MODEL GARBAGE DETECTION DEMO") + print("=" * 60) + print("Shows how we detect when model sends wrong x values") + + from mcp_cli.chat.tool_state import reset_tool_state, get_tool_state + + reset_tool_state() + state = get_tool_state() + + # These are actual values GPT-5.2 sent (from debug logs) + garbage_calls = [ + { + "x": -0.751501502, + "expected_z": 9.067, + "context": "First prompt, Poisson assumption", + }, + {"x": 1.051146509, "expected_z": 11.03, "context": "Second prompt, σ=5"}, + {"x": 3.177643716, "expected_z": 9.067, "context": "Another run"}, + ] + + print("\nGPT-5.2 sent these x values to normal_cdf:") + for i, call in enumerate(garbage_calls, 1): + print(f"\n{i}. {call['context']}") + print(f" Model computed Z ≈ {call['expected_z']} in text") + print(f" Model sent x = {call['x']} to tool") + print(f" Difference: {abs(call['x'] - call['expected_z']):.2f}") + + # Check if precondition would block it + allowed, error = state.check_tool_preconditions("normal_cdf", {"x": call["x"]}) + if not allowed: + print(" ✓ BLOCKED by precondition gate") + else: + print(" Would be allowed (values exist in state)") + + print("\n" + "-" * 60) + print("CONCLUSION: The model computes Z correctly in text but sends") + print("garbage values in the tool call. The precondition gate blocks") + print("these premature calls until actual values are computed.") + + +def main(): + print("TOOL ARGUMENT FLOW INVESTIGATION") + print("=" * 60) + print("This demo helps identify where tool arguments might") + print("get corrupted between model output and execution.") + print("=" * 60) + + demo_accumulator() + demo_complete_then_partial() + demo_different_ids() + demo_json_merge() + demo_argument_parsing() + demo_precondition_gate() + demo_model_sends_garbage() + + print("\n" + "=" * 60) + print("INVESTIGATION COMPLETE") + print("=" * 60) + print("\nKey findings:") + print("1. JSON merge preserves first values (fixed)") + print("2. Different call IDs create separate tool calls (fixed)") + print("3. Model sends garbage x values before computing Z") + print("4. Precondition gate blocks premature parameterized tool calls") + + +if __name__ == "__main__": + main() diff --git a/examples/tool_round_trip.py b/examples/tool_round_trip.py index bc926843..8276242a 100644 --- a/examples/tool_round_trip.py +++ b/examples/tool_round_trip.py @@ -36,7 +36,7 @@ # LLM helpers from chuk_llm.llm.client import get_client -from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator +from mcp_cli.chat.system_prompt import generate_system_prompt load_dotenv() @@ -94,7 +94,8 @@ async def round_trip( client = get_client(provider=provider, model=model) - system_prompt = SystemPromptGenerator().generate_prompt({"tools": tools_schema}) + # Tools are passed via the API's tools= parameter, not embedded in system prompt + system_prompt = generate_system_prompt(tools_schema) messages: List[Dict[str, Any]] = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}, diff --git a/pyproject.toml b/pyproject.toml index 7d8a4a6f..8ce92610 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "mcp-cli" -version = "0.11" +version = "0.12" description = "A cli for the Model Context Provider" requires-python = ">=3.11" readme = "README.md" @@ -15,10 +15,11 @@ keywords = ["llm", "openai", "claude", "mcp", "cli"] license = {text = "MIT"} dependencies = [ "asyncio>=3.4.3", - "chuk-llm>=0.14.3", + "chuk-ai-session-manager>=0.8", + "chuk-llm>=0.17.1", "chuk-mcp-client-oauth>=0.3.5", - "chuk-term>=0.1.3", - "chuk-tool-processor>=0.11.2", + "chuk-term>=0.3", + "chuk-tool-processor>=0.18", "cryptography>=44.0.0", "fast-json>=0.3.2", "httpx>=0.27.0", @@ -60,11 +61,9 @@ mcp_cli = ["server_config.json"] [dependency-groups] dev = [ "colorama>=0.4.6", - "mypy>=1.13.0", "pydantic>=2.10.2", "pytest-asyncio>=0.25.3", "pytest-cov>=6.2.1", - "pytest>=8.3.4", "ruff>=0.12.10", ] diff --git a/server_config.json b/server_config.json index 58b6c3b1..a18647e5 100644 --- a/server_config.json +++ b/server_config.json @@ -32,15 +32,40 @@ "@playwright/mcp@latest" ] }, + "telnet": { + "url": "http://127.0.0.1:8001/mcp" + }, "linkedin": { "url": "https://linkedin.chukai.io/mcp" }, "math": { - "command": "uvx", - "args": ["chuk-mcp-math-server"] + "url": "https://math.chukai.io/mcp" }, "weather": { "url": "https://weather.chukai.io/mcp" + }, + "celestial": { + "url": "https://celestial.chukai.io/mcp" + }, + "time": { + "url": "https://time.chukai.io/mcp" + }, + "physics": { + "url": "https://physics.chukai.io/mcp" + }, + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "solver": { + "command": "uv", + "args": ["--directory", "/Users/christopherhay/chris-source/chuk-ai/mcp-servers/chuk-mcp-solver", "run", "chuk-mcp-solver", "stdio"] + }, + "powerpoint": { + "url": "https://powerpoint.chukai.io/mcp" + }, + "motion": { + "url": "https://motion.chukai.io/mcp" } } } \ No newline at end of file diff --git a/src/mcp_cli/__main__.py b/src/mcp_cli/__main__.py index 4059edce..b993dfd5 100644 --- a/src/mcp_cli/__main__.py +++ b/src/mcp_cli/__main__.py @@ -8,9 +8,11 @@ import sys import asyncio + from mcp_cli.config import PLATFORM_WINDOWS + # Set up proper event loop policy on Windows - if sys.platform == "win32": - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + if sys.platform == PLATFORM_WINDOWS: + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore[attr-defined] try: from mcp_cli.main import app diff --git a/src/mcp_cli/adapters/chat.py b/src/mcp_cli/adapters/chat.py index d715c890..2af26348 100644 --- a/src/mcp_cli/adapters/chat.py +++ b/src/mcp_cli/adapters/chat.py @@ -165,17 +165,11 @@ async def handle_command( # Handle special actions if result.should_exit: - # Signal chat mode to exit - if context and "chat_handler" in context: - handler = context["chat_handler"] - if handler and hasattr(handler, "cleanup"): - # Check if cleanup is async - import inspect - - if inspect.iscoroutinefunction(handler.cleanup): - await handler.cleanup() - else: - handler.cleanup() + # Signal chat mode to exit by setting exit_requested on context + if context and "chat_context" in context: + chat_context = context["chat_context"] + if hasattr(chat_context, "exit_requested"): + chat_context.exit_requested = True return True if result.should_clear: @@ -289,12 +283,9 @@ def get_completions(partial_text: str) -> list[str]: return [] # For now, return parameter names as completions - completions = [] - for param in command.parameters: - if not param.is_flag: - completions.append(f"/{command_name} --{param.name}") - else: - completions.append(f"/{command_name} --{param.name}") + completions = [ + f"/{command_name} --{param.name}" for param in command.parameters + ] return completions diff --git a/src/mcp_cli/adapters/cli.py b/src/mcp_cli/adapters/cli.py index 31934f3a..d9a65554 100644 --- a/src/mcp_cli/adapters/cli.py +++ b/src/mcp_cli/adapters/cli.py @@ -59,19 +59,12 @@ def create_typer_command(): annotations = {} for param in command.parameters: - # Create Typer parameter - if param.is_flag: - typer_param = typer.Option( - param.default, - f"--{param.name}", - help=param.help, - ) - else: - typer_param = typer.Option( - param.default, - f"--{param.name}", - help=param.help, - ) + typer_param = typer.Option( + param.default, + f"--{param.name}", + help=param.help, + is_flag=param.is_flag, + ) params[param.name] = typer_param annotations[param.name] = param.type @@ -118,9 +111,9 @@ def _register_group(app: typer.Typer, group: CommandGroup) -> None: # Create a sub-app for the group sub_app = typer.Typer(help=group.description) - # Register each subcommand - for subcommand in group.subcommands.values(): - if subcommand.name != subcommand.name: # Skip aliases + # Register each subcommand (skip alias entries) + for key, subcommand in group.subcommands.items(): + if key != subcommand.name: # Skip aliases continue CLICommandAdapter._register_command(sub_app, subcommand) @@ -166,3 +159,71 @@ def create_typer_app() -> typer.Typer: CLICommandAdapter.register_with_typer(app) return app + + +async def cli_execute(command_name: str, **kwargs: Any) -> Any: + """ + Convenience function to execute a unified command from CLI. + + This is a simplified interface for main.py to use when migrating from + legacy action functions to unified commands. + + Args: + command_name: Name of the command to execute + **kwargs: Command parameters + + Returns: + Command result data (or True on success, False on failure) + + Example: + await cli_execute("tools", raw=True, details=False) + """ + from mcp_cli.commands.registry import UnifiedCommandRegistry + + # Get registry instance + cmd_registry = UnifiedCommandRegistry() + + # Look up command in registry + command = cmd_registry.get(command_name, mode=CommandMode.CLI) + + if not command: + output.error(f"Unknown command: {command_name}") + return False + + try: + # Add context if available (don't fail if not initialized) + if command.requires_context: + try: + context = get_context() + if context: + kwargs.setdefault("tool_manager", context.tool_manager) + kwargs.setdefault("model_manager", context.model_manager) + except RuntimeError: + # Context not initialized - command will run without it + pass + + # Execute command + result = await command.execute(**kwargs) + + # Handle result + if result.success: + if result.output: + # Output is already formatted by the command (str or Rich object) + output.print(result.output) + + # Return data for programmatic use + return result.data if result.data else True + + # Handle failure case + else: + if result.error: + output.error(result.error) + else: + output.error(f"Command failed: {command_name}") + + return False + + except Exception as e: + logger.exception(f"Error executing command: {command_name}") + output.error(f"Command error: {str(e)}") + return False diff --git a/src/mcp_cli/adapters/interactive.py b/src/mcp_cli/adapters/interactive.py index 9fb6d49e..eb98ed05 100644 --- a/src/mcp_cli/adapters/interactive.py +++ b/src/mcp_cli/adapters/interactive.py @@ -22,8 +22,6 @@ class InteractiveExitException(Exception): """Custom exception for exiting interactive mode without interfering with pytest.""" - pass - class InteractiveCommandAdapter: """ @@ -67,10 +65,6 @@ async def handle_command(command_line: str) -> bool: args = parts[1:] if len(parts) > 1 else [] - # Debug: log what we parsed - import logging - - logger = logging.getLogger(__name__) logger.debug(f"Parsed command: {command_name}, args: {args}") # Look up command in registry @@ -184,14 +178,9 @@ def _parse_arguments(command: Any, args: list[str]) -> dict[str, Any]: i += 1 - # Add positional arguments + # Add positional arguments (always as a list for consistency) if positional: - # If command expects specific positional args, map them - # For now, just add as "args" - if len(positional) == 1: - kwargs["args"] = positional[0] - else: - kwargs["args"] = positional + kwargs["args"] = positional return kwargs diff --git a/src/mcp_cli/async_config.py b/src/mcp_cli/async_config.py deleted file mode 100644 index e8e9ac4d..00000000 --- a/src/mcp_cli/async_config.py +++ /dev/null @@ -1,84 +0,0 @@ -# mcp_cli/async_config.py -""" -from __future__ import annotations - -Async configuration loading for MCP servers using chuk-tool-processor APIs. -""" - -import json -import logging -from pathlib import Path -from typing import Any - - -async def load_server_config( - config_path: str, server_name: str | None = None -) -> dict[str, Any]: - """ - Load the server configuration from a JSON file. - - Uses chuk-tool-processor APIs for server configuration. - """ - try: - # Debug logging - logging.debug(f"Loading config from {config_path}") - - # Read the configuration file - config_file_path = Path(config_path) - if not config_file_path.exists(): - raise FileNotFoundError(f"Configuration file not found: {config_path}") - - with open(config_file_path, "r") as config_file: - config = json.load(config_file) - - # If specific server requested, return just that server's config - if server_name: - server_config = config.get("mcpServers", {}).get(server_name) - if not server_config: - error_msg = f"Server '{server_name}' not found in configuration file." - logging.error(error_msg) - raise ValueError(error_msg) - - # Return in format expected by chuk-tool-processor - return { - "command": server_config["command"], - "args": server_config.get("args", []), - "env": server_config.get("env", {}), - } - - # Return entire config for processing multiple servers - result: dict[str, Any] = config - return result - - except FileNotFoundError: - logging.error(f"Configuration file not found: {config_path}") - raise - except json.JSONDecodeError as e: - error_msg = f"Invalid JSON in configuration file: {e.msg}" - logging.error(error_msg) - raise json.JSONDecodeError(error_msg, e.doc, e.pos) - except ValueError as e: - logging.error(str(e)) - raise - - -async def load_all_server_configs(config_path: str) -> dict[str, dict[str, Any]]: - """ - Load all server configurations from a JSON file. - - Returns: - Dictionary mapping server names to their configurations - """ - config = await load_server_config(config_path) - mcp_servers = config.get("mcpServers", {}) - - # Transform to expected format - result = {} - for server_name, server_config in mcp_servers.items(): - result[server_name] = { - "command": server_config["command"], - "args": server_config.get("args", []), - "env": server_config.get("env", {}), - } - - return result diff --git a/src/mcp_cli/auth/provider_tokens.py b/src/mcp_cli/auth/provider_tokens.py index 4d609445..4e1f7393 100644 --- a/src/mcp_cli/auth/provider_tokens.py +++ b/src/mcp_cli/auth/provider_tokens.py @@ -279,9 +279,11 @@ def list_all_provider_tokens( all_providers = list_available_providers() # Check each provider for token status + from mcp_cli.config import PROVIDER_OLLAMA + for provider_name in all_providers.keys(): # Skip ollama (doesn't need tokens) - if provider_name.lower() == "ollama": + if provider_name.lower() == PROVIDER_OLLAMA: continue status = check_provider_token_status(provider_name, token_manager) diff --git a/src/mcp_cli/chat/__init__.py b/src/mcp_cli/chat/__init__.py index 672360a0..365a4ea1 100644 --- a/src/mcp_cli/chat/__init__.py +++ b/src/mcp_cli/chat/__init__.py @@ -1,4 +1,18 @@ # mcp_cli/chat/__init__.py -from mcp_cli.chat.chat_handler import handle_chat_mode +"""Chat module for mcp-cli. + +Exports are lazy to avoid circular imports with tools.manager. +Use: from mcp_cli.chat.chat_handler import handle_chat_mode +""" + + +def __getattr__(name: str): + """Lazy import to avoid circular dependencies.""" + if name == "handle_chat_mode": + from mcp_cli.chat.chat_handler import handle_chat_mode + + return handle_chat_mode + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + __all__ = ["handle_chat_mode"] diff --git a/src/mcp_cli/chat/__main__.py b/src/mcp_cli/chat/__main__.py index 7bf87379..f9363dc7 100644 --- a/src/mcp_cli/chat/__main__.py +++ b/src/mcp_cli/chat/__main__.py @@ -48,8 +48,10 @@ def restore_terminal(): try: # Close the subprocess if it's still running if obj._proc.poll() is None: + from mcp_cli.config import SHUTDOWN_TIMEOUT + obj._proc.kill() - obj._proc.wait(timeout=0.5) # Short timeout + obj._proc.wait(timeout=SHUTDOWN_TIMEOUT) except Exception as e: logging.debug(f"Error killing subprocess: {e}") @@ -74,8 +76,11 @@ def restore_terminal(): # Wait for tasks to be cancelled (with timeout) try: - if sys.version_info >= (3, 7): - loop.run_until_complete(asyncio.wait(tasks, timeout=0.5)) + from mcp_cli.config import SHUTDOWN_TIMEOUT + + loop.run_until_complete( + asyncio.wait(tasks, timeout=SHUTDOWN_TIMEOUT) + ) except (asyncio.CancelledError, asyncio.TimeoutError): pass # Expected during cancellation diff --git a/src/mcp_cli/chat/chat_context.py b/src/mcp_cli/chat/chat_context.py index 649129f7..362e254e 100644 --- a/src/mcp_cli/chat/chat_context.py +++ b/src/mcp_cli/chat/chat_context.py @@ -1,11 +1,12 @@ # mcp_cli/chat/chat_context.py """ -Clean chat context focused on conversation state and tool coordination. +Chat context using chuk-ai-session-manager as the native conversation backend. """ from __future__ import annotations import logging +import uuid from typing import Any, AsyncIterator from chuk_term.ui import output @@ -16,51 +17,90 @@ from mcp_cli.tools.models import ToolInfo, ServerInfo from mcp_cli.model_management import ModelManager +# Native session management from chuk-ai-session-manager +from chuk_ai_session_manager import SessionManager +from chuk_ai_session_manager.models.session_event import SessionEvent +from chuk_ai_session_manager.models.event_source import EventSource +from chuk_ai_session_manager.models.event_type import EventType +from chuk_ai_session_manager.procedural_memory import ( + ToolMemoryManager, + ToolOutcome, + ProceduralContextFormatter, + FormatterConfig, +) + logger = logging.getLogger(__name__) class ChatContext: """ - Chat context focused on conversation state and tool coordination. + Chat context with SessionManager as the native conversation backend. + + SessionManager is required - no fallback to local state. + All conversation tracking flows through chuk-ai-session-manager. Responsibilities: - - Conversation history management + - Conversation history management (via SessionManager) - Tool discovery and adaptation coordination + - Procedural memory for tool learning - Session state (exit requests, etc.) - - Model management is completely delegated to ModelManager. """ - def __init__(self, tool_manager: ToolManager, model_manager: ModelManager): + def __init__( + self, + tool_manager: ToolManager, + model_manager: ModelManager, + session_id: str | None = None, + ): """ Create chat context with required managers. Args: tool_manager: Tool management interface model_manager: Model configuration and LLM client manager + session_id: Optional session ID for conversation tracking """ self.tool_manager = tool_manager self.model_manager = model_manager + self.session_id = session_id or self._generate_session_id() + + # Core session manager - always required + self.session: SessionManager = SessionManager(session_id=self.session_id) + self._system_prompt: str = "" + + # Procedural memory for tool learning + self.tool_memory = ToolMemoryManager.create(session_id=self.session_id) + self.procedural_formatter = ProceduralContextFormatter( + config=FormatterConfig( + max_recent_calls=5, + max_errors_per_tool=3, + max_successes_per_tool=2, + include_fix_suggestions=True, + ) + ) - # Conversation state + # Session state self.exit_requested = False - self.conversation_history: list[Message] = [] - self.tool_history: list[ - ToolExecutionRecord - ] = [] # Track tool execution history + self.tool_history: list[ToolExecutionRecord] = [] + + # ToolProcessor back-reference (set by ToolProcessor.__init__) + self.tool_processor: Any = None # Tool state (filled during initialization) self.tools: list[ToolInfo] = [] self.internal_tools: list[ToolInfo] = [] self.server_info: list[ServerInfo] = [] self.tool_to_server_map: dict[str, str] = {} - self.openai_tools: list[ - dict[str, Any] - ] = [] # These remain dicts for OpenAI API + self.openai_tools: list[dict[str, Any]] = [] self.tool_name_mapping: dict[str, str] = {} logger.debug(f"ChatContext created with {self.provider}/{self.model}") + @staticmethod + def _generate_session_id() -> str: + """Generate a unique session ID.""" + return f"chat-{uuid.uuid4().hex[:12]}" + @classmethod def create( cls, @@ -69,7 +109,8 @@ def create( model: str | None = None, api_base: str | None = None, api_key: str | None = None, - model_manager: ModelManager | None = None, # FIXED: Accept model_manager + model_manager: ModelManager | None = None, + session_id: str | None = None, ) -> "ChatContext": """ Factory method for convenient creation. @@ -81,33 +122,30 @@ def create( api_base: API base URL override (optional) api_key: API key override (optional) model_manager: Pre-configured ModelManager (optional, creates new if None) + session_id: Session ID for conversation tracking (optional) Returns: Configured ChatContext instance """ - # FIXED: Use provided model_manager if available, otherwise create new if model_manager is None: model_manager = ModelManager() - # Configure provider if API settings provided if provider and (api_base or api_key): model_manager.add_runtime_provider( name=provider, api_key=api_key, api_base=api_base or "" ) - # Switch model if requested if provider and model: model_manager.switch_model(provider, model) elif provider: model_manager.switch_provider(provider) elif model: - # Switch model in current provider current_provider = model_manager.get_active_provider() model_manager.switch_model(current_provider, model) - return cls(tool_manager, model_manager) + return cls(tool_manager, model_manager, session_id) - # ── Properties that delegate to ModelManager ────────────────────────── + # ── Properties ──────────────────────────────────────────────────────── @property def client(self) -> Any: """Get current LLM client (cached automatically by ModelManager).""" @@ -123,12 +161,50 @@ def model(self) -> str: """Current model name.""" return self.model_manager.get_active_model() + @property + def conversation_history(self) -> list[Message]: + """ + Get conversation history as list of Message objects. + + Provides backwards compatibility while using SessionManager internally. + Handles both regular messages and tool-related messages. + """ + messages = [] + + # System prompt first + if self._system_prompt: + messages.append( + Message(role=MessageRole.SYSTEM, content=self._system_prompt) + ) + + # Get events from session + if self.session._session: + for event in self.session._session.events: + if event.type == EventType.MESSAGE: + if event.source == EventSource.USER: + messages.append( + Message(role=MessageRole.USER, content=str(event.message)) + ) + elif event.source in (EventSource.LLM, EventSource.SYSTEM): + messages.append( + Message( + role=MessageRole.ASSISTANT, content=str(event.message) + ) + ) + elif event.type == EventType.TOOL_CALL: + # Tool messages stored as dict - reconstruct Message + if isinstance(event.message, dict): + messages.append(Message.from_dict(event.message)) + + return messages + # ── Initialization ──────────────────────────────────────────────────── async def initialize(self) -> bool: - """Initialize tools and conversation state.""" + """Initialize tools, session, and procedural memory.""" try: await self._initialize_tools() - self._initialize_conversation() + self._generate_system_prompt() + await self._initialize_session() if not self.tools: output.print( @@ -145,32 +221,40 @@ async def initialize(self) -> bool: output.print(f"[red]Error initializing chat context: {exc}[/red]") return False + async def _initialize_session(self) -> None: + """Initialize the session with system prompt.""" + self.session = SessionManager( + session_id=self.session_id, + system_prompt=self._system_prompt, + infinite_context=False, + ) + await self.session._ensure_initialized() + logger.debug(f"Session initialized: {self.session_id}") + + def _generate_system_prompt(self) -> None: + """Generate system prompt from available tools.""" + tools_for_prompt = [ + tool.to_llm_format().to_dict() for tool in self.internal_tools + ] + self._system_prompt = generate_system_prompt(tools_for_prompt) + async def _initialize_tools(self) -> None: """Initialize tool discovery and adaptation.""" - # Get tools from ToolManager - already returns ToolInfo objects self.tools = await self.tool_manager.get_unique_tools() logger.debug(f"ChatContext: Initialized with {len(self.tools)} tools") - # Get server info - already returns ServerInfo objects self.server_info = await self.tool_manager.get_server_info() - - # Build tool-to-server mapping using ToolInfo objects self.tool_to_server_map = {t.name: t.namespace for t in self.tools} - # Adapt tools for current provider await self._adapt_tools_for_provider() - - # Keep copy for system prompt self.internal_tools = list(self.tools) def find_tool_by_name(self, name: str) -> ToolInfo | None: """Find a tool by its name (handles both simple and namespaced names).""" - # First try exact match for tool in self.tools: if tool.name == name or tool.fully_qualified_name == name: return tool - # Try partial match (just the tool name without namespace) simple_name = name.split(".")[-1] if "." in name else name for tool in self.tools: if tool.name == simple_name: @@ -198,36 +282,16 @@ async def _adapt_tools_for_provider(self) -> None: f"Adapted {len(self.openai_tools)} tools for {self.provider}" ) else: - # Fallback to generic tools self.openai_tools = await self.tool_manager.get_tools_for_llm() self.tool_name_mapping = {} except Exception as exc: logger.warning(f"Error adapting tools: {exc}") - # Final fallback - use the raw tool format self.openai_tools = await self.tool_manager.get_tools_for_llm() self.tool_name_mapping = {} - def _initialize_conversation(self) -> None: - """Initialize conversation with system prompt.""" - # Convert ToolInfo objects to dicts for system prompt generation - tools_for_prompt = [] - for tool in self.internal_tools: - # Convert to LLM format and then to dict - tools_for_prompt.append(tool.to_llm_format().to_dict()) - - system_prompt = generate_system_prompt(tools_for_prompt) - self.conversation_history = [ - Message(role=MessageRole.SYSTEM, content=system_prompt) - ] - # ── Model change handling ───────────────────────────────────────────── async def refresh_after_model_change(self) -> None: - """ - Refresh context after ModelManager changes the model. - - Call this after model_manager.switch_model() to update tools. - ModelManager handles client refresh automatically. - """ + """Refresh context after ModelManager changes the model.""" await self._adapt_tools_for_provider() logger.debug(f"ChatContext refreshed for {self.provider}/{self.model}") @@ -248,46 +312,148 @@ async def get_server_for_tool(self, tool_name: str) -> str: return await self.tool_manager.get_server_for_tool(tool_name) or "Unknown" # ── Conversation management ─────────────────────────────────────────── - def add_user_message(self, content: str) -> None: + async def add_user_message(self, content: str) -> None: """Add user message to conversation.""" - self.conversation_history.append( - Message(role=MessageRole.USER, content=content) - ) + await self.session.user_says(content) + logger.debug(f"User message added: {content[:50]}...") - def add_assistant_message(self, content: str) -> None: + async def add_assistant_message(self, content: str) -> None: """Add assistant message to conversation.""" - self.conversation_history.append( - Message(role=MessageRole.ASSISTANT, content=content) + await self.session.ai_responds( + content, model=self.model, provider=self.provider + ) + logger.debug(f"Assistant message added: {content[:50]}...") + + def inject_assistant_message(self, content: str) -> None: + """ + Inject a synthetic assistant message for conversation flow control. + + Use this for system-generated messages (budget exhaustion, state summaries, + error recovery) that guide the model but aren't true AI responses. + """ + event = SessionEvent( + message=content, + source=EventSource.SYSTEM, + type=EventType.MESSAGE, + ) + self.session._session.events.append(event) + logger.debug(f"Injected assistant message: {content[:50]}...") + + def inject_tool_message(self, message: Message) -> None: + """ + Inject a tool-related message into conversation history. + + Tool messages (assistant with tool_calls, tool results) have special structure + that doesn't map to SessionManager events. These are stored as raw events + for conversation flow but tracked separately in procedural memory. + """ + # Store as TOOL_CALL event with the full message structure + event = SessionEvent( + message=message.to_dict(), + source=EventSource.SYSTEM, + type=EventType.TOOL_CALL, + ) + self.session._session.events.append(event) + logger.debug(f"Injected tool message: role={message.role}") + + async def record_tool_call( + self, + tool_name: str, + arguments: dict[str, Any], + result: Any, + success: bool = True, + error: str | None = None, + context_goal: str | None = None, + ) -> None: + """ + Record a tool call in session and procedural memory. + + Args: + tool_name: Name of the tool called + arguments: Arguments passed to the tool + result: Result returned by the tool + success: Whether the call succeeded + error: Error message if failed + context_goal: What the user was trying to accomplish + """ + # Record in session + await self.session.tool_used( + tool_name=tool_name, + arguments=arguments, + result=result, + error=error, + ) + + # Record in procedural memory for learning + outcome = ToolOutcome.SUCCESS if success else ToolOutcome.FAILURE + error_type = ( + type(error).__name__ if error and not isinstance(error, str) else None + ) + + await self.tool_memory.record_call( + tool_name=tool_name, + arguments=arguments, + result=result, + outcome=outcome, + context_goal=context_goal, + error_type=error_type, + error_message=str(error) if error else None, + ) + + logger.debug(f"Tool call recorded: {tool_name} (success={success})") + + async def get_messages_for_llm(self) -> list[dict[str, str]]: + """Get messages formatted for LLM API calls.""" + result: list[dict[str, str]] = await self.session.get_messages_for_llm( + include_system=True ) + return result def get_conversation_length(self) -> int: """Get conversation length (excluding system prompt).""" - return max(0, len(self.conversation_history) - 1) - - def clear_conversation_history(self, keep_system_prompt: bool = True) -> None: - """Clear conversation history.""" - if ( - keep_system_prompt - and self.conversation_history - and self.conversation_history[0].role == MessageRole.SYSTEM - ): - system_prompt = self.conversation_history[0] - self.conversation_history = [system_prompt] - else: - self.conversation_history = [] - - def regenerate_system_prompt(self) -> None: - """Regenerate system prompt with current tools.""" - system_prompt = generate_system_prompt(self.internal_tools) - if ( - self.conversation_history - and self.conversation_history[0].role == MessageRole.SYSTEM - ): - self.conversation_history[0].content = system_prompt - else: - self.conversation_history.insert( - 0, Message(role=MessageRole.SYSTEM, content=system_prompt) + if self.session._session: + return sum( + 1 for e in self.session._session.events if e.type == EventType.MESSAGE ) + return 0 + + async def clear_conversation_history(self, keep_system_prompt: bool = True) -> None: + """Clear conversation history by creating a new session.""" + self.session_id = self._generate_session_id() + await self._initialize_session() + self.tool_memory = ToolMemoryManager.create(session_id=self.session_id) + logger.debug(f"Conversation cleared, new session: {self.session_id}") + + async def regenerate_system_prompt(self) -> None: + """Regenerate system prompt with current tools.""" + self._generate_system_prompt() + await self.session.update_system_prompt(self._system_prompt) + + # ── Procedural memory helpers ───────────────────────────────────────── + def get_procedural_context_for_tools( + self, tool_names: list[str], context_goal: str | None = None + ) -> str: + """ + Get procedural memory context for tools about to be called. + + Use this to inject relevant tool history before making LLM calls. + """ + result: str = self.procedural_formatter.format_for_tools( + self.tool_memory, tool_names, context_goal + ) + return result + + def get_recent_tool_history(self, limit: int = 5) -> list[dict[str, Any]]: + """Get recent tool call history from procedural memory.""" + return [ + { + "tool": entry.tool_name, + "arguments": entry.arguments, + "outcome": entry.outcome.value, + "timestamp": entry.timestamp.isoformat(), + } + for entry in self.tool_memory.memory.tool_log[-limit:] + ] # ── Simple getters ──────────────────────────────────────────────────── def get_tool_count(self) -> int: @@ -303,7 +469,7 @@ def get_display_name_for_tool(namespaced_tool_name: str) -> str: """Get display name for tool.""" return namespaced_tool_name - # ── Serialization (simplified) ──────────────────────────────────────── + # ── Serialization ───────────────────────────────────────────────────── def to_dict(self) -> dict[str, Any]: """Export context for command handlers.""" return { @@ -322,26 +488,17 @@ def to_dict(self) -> dict[str, Any]: "exit_requested": self.exit_requested, "tool_to_server_map": self.tool_to_server_map, "tool_manager": self.tool_manager, + "session_id": self.session_id, } def update_from_dict(self, context_dict: dict[str, Any]) -> None: - """Update context from dictionary (simplified).""" - # Core state updates + """Update context from dictionary.""" if "exit_requested" in context_dict: self.exit_requested = context_dict["exit_requested"] - if "conversation_history" in context_dict: - history = context_dict["conversation_history"] - # Handle both list of dicts and list of Message objects - if history and isinstance(history[0], dict): - self.conversation_history = [Message.from_dict(msg) for msg in history] - else: - self.conversation_history = history - if "model_manager" in context_dict: self.model_manager = context_dict["model_manager"] - # Tool state updates (for command handlers that modify tools) for key in [ "tools", "internal_tools", @@ -362,7 +519,7 @@ async def __aenter__(self): async def __aexit__(self, exc_type, exc_val, exc_tb): """Async context manager exit.""" - pass # ModelManager handles its own persistence + pass # ── Debug info ──────────────────────────────────────────────────────── def get_status_summary(self) -> ChatStatus: @@ -377,110 +534,16 @@ def get_status_summary(self) -> ChatStatus: tool_execution_count=len(self.tool_history), ) + async def get_session_stats(self) -> dict[str, Any]: + """Get session statistics.""" + result: dict[str, Any] = await self.session.get_stats() + return result + def __repr__(self) -> str: return ( - f"ChatContext(provider='{self.provider}', model='{self.model}', " - f"tools={len(self.tools)}, messages={self.get_conversation_length()})" + f"ChatContext(session='{self.session_id}', provider='{self.provider}', " + f"model='{self.model}', tools={len(self.tools)}, messages={self.get_conversation_length()})" ) def __str__(self) -> str: - return f"Chat session with {self.provider}/{self.model} ({len(self.tools)} tools, {self.get_conversation_length()} messages)" - - -# ═══════════════════════════════════════════════════════════════════════════════════ -# For testing - separate class to keep main ChatContext clean -# ═══════════════════════════════════════════════════════════════════════════════════ - - -class TestChatContext(ChatContext): - """ - Test-specific ChatContext that works with stream_manager instead of ToolManager. - - Separated from main ChatContext to keep it clean. - """ - - def __init__(self, stream_manager: Any, model_manager: ModelManager): - """Create test context with stream_manager.""" - # Initialize base attributes without calling super().__init__ - self.tool_manager = None # type: ignore[assignment] # Tests don't use ToolManager - self.stream_manager = stream_manager - self.model_manager = model_manager - - # Conversation state - self.exit_requested = False - self.conversation_history = [] - - # Tool state - self.tools = [] - self.internal_tools = [] - self.server_info = [] - self.tool_to_server_map = {} - self.openai_tools = [] - self.tool_name_mapping = {} - - logger.debug(f"TestChatContext created with {self.provider}/{self.model}") - - @classmethod - def create_for_testing( - cls, - stream_manager: Any, - provider: str | None = None, - model: str | None = None, - ) -> "TestChatContext": - """Factory for test contexts.""" - model_manager = ModelManager() - - if provider and model: - model_manager.switch_model(provider, model) - elif provider: - model_manager.switch_provider(provider) - elif model: - # Switch model in current provider - current_provider = model_manager.get_active_provider() - model_manager.switch_model(current_provider, model) - - return cls(stream_manager, model_manager) - - async def _initialize_tools(self) -> None: - """Test-specific tool initialization.""" - # Get tools from stream_manager - if hasattr(self.stream_manager, "get_internal_tools"): - self.tools = list(self.stream_manager.get_internal_tools()) - else: - self.tools = list(self.stream_manager.get_all_tools()) - - # Get server info - self.server_info = list(self.stream_manager.get_server_info()) - - # Build mappings - tools are ToolInfo objects - self.tool_to_server_map = { - t.name: self.stream_manager.get_server_for_tool(t.name) for t in self.tools - } - - # Convert tools to OpenAI format for tests - self.openai_tools = [ - { - "type": "function", - "function": { - "name": t.name, - "description": t.description or "", - "parameters": t.parameters or {}, - }, - } - for t in self.tools - ] - self.tool_name_mapping = {} - - # Copy for system prompt - self.internal_tools = list(self.tools) - - async def execute_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any: - """Execute tool via stream_manager.""" - if hasattr(self.stream_manager, "call_tool"): - return await self.stream_manager.call_tool(tool_name, arguments) - else: - raise ValueError("Stream manager doesn't support tool execution") - - async def get_server_for_tool(self, tool_name: str) -> str: - """Get server for tool from stream_manager.""" - return self.stream_manager.get_server_for_tool(tool_name) or "Unknown" + return f"Chat session {self.session_id} with {self.provider}/{self.model} ({len(self.tools)} tools)" diff --git a/src/mcp_cli/chat/chat_handler.py b/src/mcp_cli/chat/chat_handler.py index bb920c3d..97edc36d 100644 --- a/src/mcp_cli/chat/chat_handler.py +++ b/src/mcp_cli/chat/chat_handler.py @@ -5,6 +5,7 @@ from __future__ import annotations +import asyncio import gc import logging @@ -17,12 +18,14 @@ ) # Local imports -from mcp_cli.chat.chat_context import ChatContext, TestChatContext +from mcp_cli.chat.chat_context import ChatContext +from mcp_cli.chat.testing import TestChatContext from mcp_cli.chat.ui_manager import ChatUIManager from mcp_cli.chat.conversation import ConversationProcessor from mcp_cli.tools.manager import ToolManager from mcp_cli.context import initialize_context from mcp_cli.config import initialize_config +from mcp_cli.config.defaults import DEFAULT_PROVIDER, DEFAULT_MODEL # Set up logger logger = logging.getLogger(__name__) @@ -35,12 +38,18 @@ async def handle_chat_mode( api_base: str | None = None, api_key: str | None = None, confirm_mode: str | None = None, - max_turns: int = 30, + max_turns: int = 100, model_manager=None, # FIXED: Accept model_manager from caller + runtime_config=None, # RuntimeConfig | None ) -> bool: """ Launch the interactive chat loop with streaming support. + Runtime uses adaptive policy: strict core with smooth wrapper. + - Always enforces grounding rules (no ungrounded numeric calls) + - Automatically attempts to repair blocked calls (rebind, symbolic fallback) + - Only surfaces errors when all repair options exhausted + Args: tool_manager: Initialized ToolManager instance provider: Provider to use (optional, uses ModelManager active if None) @@ -48,8 +57,9 @@ async def handle_chat_mode( api_base: API base URL override (optional) api_key: API key override (optional) confirm_mode: Tool confirmation mode override (optional) - max_turns: Maximum conversation turns before forcing exit (default: 30) + max_turns: Maximum conversation turns before forcing exit (default: 100) model_manager: Pre-configured ModelManager (optional, creates new if None) + runtime_config: Runtime configuration with timeout overrides (optional) Returns: True if session ended normally, False on failure @@ -65,8 +75,8 @@ async def handle_chat_mode( # Initialize global context manager for commands to work app_context = initialize_context( tool_manager=tool_manager, - provider=provider or "openai", - model=model or "gpt-4", + provider=provider or DEFAULT_PROVIDER, + model=model or DEFAULT_MODEL, api_base=api_base, api_key=api_key, model_manager=model_manager, # FIXED: Pass model_manager with runtime providers @@ -131,7 +141,7 @@ async def handle_chat_mode( # UI and conversation processor ui = ChatUIManager(ctx) - convo = ConversationProcessor(ctx, ui) + convo = ConversationProcessor(ctx, ui, runtime_config) # Main chat loop with streaming support await _run_enhanced_chat_loop(ui, ctx, convo, max_turns) @@ -170,7 +180,8 @@ async def handle_chat_mode_for_testing( stream_manager, provider: str | None = None, model: str | None = None, - max_turns: int = 30, + max_turns: int = 100, + runtime_config=None, # RuntimeConfig | None ) -> bool: """ Launch chat mode for testing with stream_manager. @@ -181,7 +192,8 @@ async def handle_chat_mode_for_testing( stream_manager: Test stream manager provider: Provider for testing model: Model for testing - max_turns: Maximum conversation turns before forcing exit (default: 30) + max_turns: Maximum conversation turns before forcing exit (default: 100) + runtime_config: Runtime configuration with timeout overrides (optional) Returns: True if session ended normally, False on failure @@ -207,7 +219,7 @@ async def handle_chat_mode_for_testing( # UI and conversation processor ui = ChatUIManager(ctx) - convo = ConversationProcessor(ctx, ui) + convo = ConversationProcessor(ctx, ui, runtime_config) # Main chat loop with streaming support await _run_enhanced_chat_loop(ui, ctx, convo, max_turns) @@ -233,7 +245,7 @@ async def _run_enhanced_chat_loop( ui: ChatUIManager, ctx: ChatContext, convo: ConversationProcessor, - max_turns: int = 30, + max_turns: int = 100, ) -> None: """ Run the main chat loop with enhanced streaming support. @@ -242,7 +254,7 @@ async def _run_enhanced_chat_loop( ui: UI manager with streaming coordination ctx: Chat context convo: Conversation processor with streaming support - max_turns: Maximum conversation turns before forcing exit (default: 30) + max_turns: Maximum conversation turns before forcing exit (default: 100) """ while True: try: @@ -281,13 +293,16 @@ async def _run_enhanced_chat_loop( # Normal conversation turn with streaming support if ui.verbose_mode: ui.print_user_message(user_msg) - ctx.add_user_message(user_msg) + await ctx.add_user_message(user_msg) # Use the enhanced conversation processor that handles streaming await convo.process_conversation(max_turns=max_turns) - except KeyboardInterrupt: - # Handle Ctrl+C gracefully + except (KeyboardInterrupt, asyncio.CancelledError): + # Handle Ctrl+C gracefully (KeyboardInterrupt or asyncio.CancelledError in async code) + logger.info( + f"Interrupt in chat loop - streaming={ui.is_streaming_response}, tools_running={ui.tools_running}" + ) if ui.is_streaming_response: output.warning("\nStreaming interrupted - type 'exit' to quit.") ui.interrupt_streaming() @@ -296,6 +311,9 @@ async def _run_enhanced_chat_loop( ui._interrupt_now() else: output.warning("\nInterrupted - type 'exit' to quit.") + # CRITICAL: Continue the loop instead of exiting + logger.info("Continuing chat loop after interrupt...") + continue except EOFError: output.panel("EOF detected - exiting chat.", style="red", title="Exit") break @@ -316,7 +334,7 @@ async def _safe_cleanup(ui: ChatUIManager) -> None: # Stop any streaming responses if ui.is_streaming_response: ui.interrupt_streaming() - ui.stop_streaming_response() + ui.stop_streaming_response_sync() # Stop any tool execution if ui.tools_running: diff --git a/src/mcp_cli/chat/command_completer.py b/src/mcp_cli/chat/command_completer.py index 111f646c..1819fdd3 100644 --- a/src/mcp_cli/chat/command_completer.py +++ b/src/mcp_cli/chat/command_completer.py @@ -17,37 +17,39 @@ def get_completions(self, document, complete_event): # Ensure commands are registered register_all_commands() - txt = document.text.lstrip() - if not txt.startswith("/"): + # Get text before cursor + text = document.text_before_cursor.lstrip() + + # Only complete if we're typing a command (starts with /) + if not text or not text.startswith("/"): return # Get unified commands registry = UnifiedCommandRegistry() commands = registry.list_commands(mode=CommandMode.CHAT) - # Generate completions - for cmd in commands: - # Check if this command matches the partial text - if f"/{cmd.name}".startswith(txt): - # Calculate the replacement text (only the part not yet typed) - replacement = f"/{cmd.name}"[len(txt) :] + # Calculate start position - how far back to replace + start_pos = -len(text) + # Show all matching commands + for cmd in commands: + cmd_text = f"/{cmd.name}" + # Check if this command matches what's been typed + if cmd_text.startswith(text): yield Completion( - replacement, - start_position=0, - display=f"/{cmd.name}", - display_meta=cmd.description[:40] - if len(cmd.description) > 40 - else cmd.description, + cmd_text, + start_position=start_pos, + display=cmd_text, + display_meta=cmd.description, ) # Also check aliases for alias in cmd.aliases: - if f"/{alias}".startswith(txt) and alias != cmd.name: - replacement = f"/{alias}"[len(txt) :] + alias_text = f"/{alias}" + if alias_text.startswith(text) and alias != cmd.name: yield Completion( - replacement, - start_position=0, - display=f"/{alias}", + alias_text, + start_position=start_pos, + display=alias_text, display_meta=f"→ /{cmd.name}", ) diff --git a/src/mcp_cli/chat/commands/__init__.py b/src/mcp_cli/chat/commands/__init__.py deleted file mode 100644 index b5aad036..00000000 --- a/src/mcp_cli/chat/commands/__init__.py +++ /dev/null @@ -1,152 +0,0 @@ -# mcp_cli/chat/commands/__init__.py -""" -from __future__ import annotations - -Command handling system for the MCP CLI chat interface. -""" - -from typing import Any, Callable, Awaitable - -# Type for command handlers -CommandHandler = Callable[[list[str]], Awaitable[bool]] - -# Global registries -_COMMAND_HANDLERS: dict[str, CommandHandler] = {} -_COMMAND_COMPLETIONS: dict[str, list[str]] = {} -_COMMAND_ALIASES: dict[str, str] = {} - - -def register_command( - command: str, handler: CommandHandler, completions: list[str] | None = None -) -> None: - """ - Register a command handler. - - Args: - command: The command (starting with /) to register. - handler: Async function that handles the command. - completions: List of completion options for this command. - """ - if not command.startswith("/"): - raise ValueError(f"Command {command} must start with /") - - # Register the handler - _COMMAND_HANDLERS[command] = handler - - # Register completion options if provided - if completions: - _COMMAND_COMPLETIONS[command] = completions - - -def register_alias(alias: str, target: str) -> None: - """ - Register an alias for an existing command. - - Args: - alias: The alias command (starting with /) - target: The target command it points to (starting with /) - """ - if not alias.startswith("/") or not target.startswith("/"): - raise ValueError("Both alias and target must start with /") - - if target not in _COMMAND_HANDLERS: - raise ValueError(f"Cannot create alias to unknown command: {target}") - - _COMMAND_ALIASES[alias] = target - - # Also copy any completions - if target in _COMMAND_COMPLETIONS: - _COMMAND_COMPLETIONS[alias] = _COMMAND_COMPLETIONS[target] - - -async def handle_command( - command_text: str, context: dict[str, Any] | None = None -) -> bool: - """ - Handle a command and return whether it was handled. - - Args: - command_text: The full command text (starting with /). - context: Optional context dictionary for backwards compatibility. - - Returns: - bool: True if the command was handled, False otherwise. - """ - # Split the command and arguments - parts = command_text.split() - if not parts: - return False - - cmd = parts[0].lower() - - # Check if it's an alias and resolve it - if cmd in _COMMAND_ALIASES: - cmd = _COMMAND_ALIASES[cmd] - # Replace the command part with the resolved alias - parts[0] = cmd - - # Look up the handler - handler = _COMMAND_HANDLERS.get(cmd) - if not handler: - return False - - # Call the handler with just args (no context parameter) - return await handler(parts) - - -def get_command_completions(partial_text: str) -> list[str]: - """ - Get command completions for a partial command. - - Args: - partial_text: The partially typed command. - - Returns: - List of possible completions. - """ - completions = [] - - # Split into command and arguments - parts = partial_text.strip().split(maxsplit=1) - cmd = parts[0].lower() if parts else "" - has_arg = len(parts) > 1 - - # If no specific argument, suggest commands that start with the partial input - if not has_arg: - for command in _COMMAND_HANDLERS: - if command.startswith(cmd): - completions.append(command) - - # Also add aliases - for alias in _COMMAND_ALIASES: - if alias.startswith(cmd): - completions.append(alias) - - # If we have an argument, suggest completions for this specific command - elif cmd in _COMMAND_COMPLETIONS: - arg_part = parts[1].strip() if len(parts) > 1 else "" - for completion in _COMMAND_COMPLETIONS[cmd]: - # Handle parameter placeholders like - if completion.startswith("<") and completion.endswith(">"): - completions.append(f"{cmd} {completion}") - # Regular completions that match the partial argument - elif completion.startswith(arg_part): - completions.append(f"{cmd} {completion}") - - return completions - - -# Import any built-in command modules here -# This allows them to self-register their commands - - -def _import_submodules(): - """Import all submodules to allow them to register their commands.""" - # DISABLED: All commands have been migrated to the unified system - # This function is kept for backward compatibility only - pass - - -# Note: All command implementations have been migrated to the unified system -# This module is kept for backward compatibility only -# Actual commands are in mcp_cli.commands.impl.* diff --git a/src/mcp_cli/chat/conversation.py b/src/mcp_cli/chat/conversation.py index 49739626..71e84f84 100644 --- a/src/mcp_cli/chat/conversation.py +++ b/src/mcp_cli/chat/conversation.py @@ -1,18 +1,29 @@ -# mcp_cli/chat/conversation.py - FIXED VERSION -""" -from __future__ import annotations +"""mcp_cli.chat.conversation - FIXED VERSION FIXED: Updated to work with the new OpenAI client universal tool compatibility system. +Clean Pydantic models - no dictionary goop! + +ENHANCED: Added tool state management to prevent "model getting lost": +- Caches tool results so duplicates return cached values +- Injects compact state summaries back to the model +- Continues conversation instead of aborting on duplicate calls """ +from __future__ import annotations + import time import asyncio import logging from chuk_term.ui import output -# mcp cli imports -from mcp_cli.chat.models import Message, MessageRole +# mcp cli imports - using chuk_llm canonical models +from mcp_cli.chat.response_models import ( + CompletionResponse, + Message, + MessageRole, +) from mcp_cli.chat.tool_processor import ToolProcessor +from chuk_ai_session_manager.guards import get_tool_state log = logging.getLogger(__name__) @@ -22,27 +33,88 @@ class ConversationProcessor: Class to handle LLM conversation processing with streaming support. Updated to work with universal tool compatibility system. + + ENHANCED: Now includes tool state management to prevent "model getting lost": + - Tracks tool call results in a cache + - Returns cached values on duplicate calls instead of aborting + - Injects state summaries to help model track computed values """ - def __init__(self, context, ui_manager): + # Tool name patterns that are polling/status tools - exempt from loop detection + # These tools are expected to be called repeatedly with the same args + POLLING_TOOL_PATTERNS = frozenset( + { + "status", + "poll", + "check", + "monitor", + "watch", + "wait", + "progress", + "state", + } + ) + + def __init__( + self, + context, + ui_manager, + runtime_config=None, + ): self.context = context self.ui_manager = ui_manager self.tool_processor = ToolProcessor(context, ui_manager) + # Store runtime_config for passing to streaming handler + self.runtime_config = runtime_config + # Tool state manager for caching and variable binding + self._tool_state = get_tool_state() + # Counter for consecutive duplicate detections (for escalation) + self._consecutive_duplicate_count = 0 + self._max_consecutive_duplicates = 5 # Abort after this many + # Runtime uses adaptive policy: strict core with smooth wrapper + # No mode selection needed - always enforces grounding with auto-repair + + def _is_polling_tool(self, tool_name: str) -> bool: + """Check if a tool is a polling/status tool that should be exempt from loop detection. + + Polling tools (like render_status, check_progress, etc.) are expected to be called + repeatedly with the same arguments to monitor changing state. These should not + trigger the duplicate call detection. + """ + tool_lower = tool_name.lower() + for pattern in self.POLLING_TOOL_PATTERNS: + if pattern in tool_lower: + return True + return False - async def process_conversation(self, max_turns: int = 30): + async def process_conversation(self, max_turns: int = 100): """Process the conversation loop, handling tool calls and responses with streaming. Args: - max_turns: Maximum number of conversation turns before forcing exit (default: 30) + max_turns: Maximum number of conversation turns before forcing exit (default: 100) """ turn_count = 0 - last_tool_signature = None # Track last tool call to detect duplicates + last_tool_signature = None # Track last tool call to detect true duplicates tools_for_completion = None # Will be set based on context + + # Reset tool state for this new prompt + self._tool_state.reset_for_new_prompt() + + # Advance search engine turn for session boosting + # Tools used recently get boosted in search results + from chuk_tool_processor.discovery import get_search_engine + + search_engine = get_search_engine() + search_engine.advance_turn() + + # Register user literals from the latest user message + # This whitelists numbers from the user prompt so they pass ungrounded checks + self._register_user_literals_from_history() + try: while turn_count < max_turns: try: turn_count += 1 - start_time = time.time() # Skip slash commands (already handled by UI) last_msg = ( @@ -76,9 +148,13 @@ async def process_conversation(self, max_turns: int = 30): # Log last few messages for debugging (truncated) for i, msg in enumerate(self.context.conversation_history[-3:]): - role = msg.role if hasattr(msg, 'role') else msg.get('role', 'unknown') - content_preview = str(msg.content)[:100] if hasattr(msg, 'content') else str(msg.get('content', ''))[:100] - log.debug(f" Message {history_size - 3 + i}: role={role}, content_preview={content_preview}") + role = ( + msg.role if isinstance(msg, Message) else MessageRole.USER + ) + content_preview = str(msg.content)[:100] if msg.content else "" + log.debug( + f" Message {history_size - 3 + i}: role={role}, content_preview={content_preview}" + ) # Check if client supports streaming client = self.context.client @@ -98,7 +174,7 @@ async def process_conversation(self, max_turns: int = 30): log.debug(f"Could not inspect signature: {e}") supports_streaming = False - completion = None + completion: CompletionResponse | None = None if supports_streaming: # Use streaming response handler @@ -122,57 +198,230 @@ async def process_conversation(self, max_turns: int = 30): tools=tools_for_completion ) - response_content = completion.get("response", "No response") - tool_calls = completion.get("tool_calls", []) + # Use Pydantic model properties instead of dict.get() + response_content = completion.response or "No response" + tool_calls = completion.tool_calls + reasoning_content = completion.reasoning_content + + # Trace-level logging for completion results + log.debug("=== COMPLETION RESULT ===") + log.debug( + f"Response length: {len(response_content) if response_content else 0}" + ) + log.debug( + f"Tool calls count: {len(tool_calls) if tool_calls else 0}" + ) + log.debug( + f"Reasoning length: {len(reasoning_content) if reasoning_content else 0}" + ) + if response_content and response_content != "No response": + log.debug(f"Response preview: {response_content[:200]}") + if tool_calls: + for i, tc in enumerate(tool_calls): + log.debug( + f"Tool call {i}: {tc.function.name} args={tc.function.arguments}" + ) # If model requested tool calls, execute them if tool_calls and len(tool_calls) > 0: log.debug(f"Processing {len(tool_calls)} tool calls from LLM") + # Check split budgets for each tool call type + # Get name mapping for looking up actual tool names + name_mapping = getattr(self.context, "tool_name_mapping", {}) + + # Check if any discovery tools would exceed budget + # Uses behavior-based classification (pattern matching + result shape) + discovery_tools_requested = [] + execution_tools_requested = [] + + for tc in tool_calls: + tool_name = name_mapping.get( + tc.function.name, tc.function.name + ) + if self._tool_state.is_discovery_tool(tool_name): + discovery_tools_requested.append(tool_name) + elif self._tool_state.is_execution_tool(tool_name): + execution_tools_requested.append(tool_name) + + # Check discovery budget first + if discovery_tools_requested: + disc_status = self._tool_state.check_runaway( + discovery_tools_requested[0] + ) + if disc_status.should_stop and "Discovery" in ( + disc_status.reason or "" + ): + log.warning( + f"Discovery budget exhausted: {disc_status.reason}" + ) + output.warning( + "⚠ Discovery budget exhausted - no more searching" + ) + + stop_msg = self._tool_state.format_discovery_exhausted_message() + self.context.inject_assistant_message(stop_msg) + + if self.ui_manager.is_streaming_response: + await self.ui_manager.stop_streaming_response() + if hasattr(self.ui_manager, "streaming_handler"): + self.ui_manager.streaming_handler = None + continue + + # Check execution budget + if execution_tools_requested: + exec_status = self._tool_state.check_runaway( + execution_tools_requested[0] + ) + if exec_status.should_stop and "Execution" in ( + exec_status.reason or "" + ): + log.warning( + f"Execution budget exhausted: {exec_status.reason}" + ) + output.warning( + "⚠ Execution budget exhausted - no more tool calls" + ) + + stop_msg = self._tool_state.format_execution_exhausted_message() + self.context.inject_assistant_message(stop_msg) + + if self.ui_manager.is_streaming_response: + await self.ui_manager.stop_streaming_response() + if hasattr(self.ui_manager, "streaming_handler"): + self.ui_manager.streaming_handler = None + continue + + # Check general runaway status (combined budget, saturation, etc.) + runaway_status = self._tool_state.check_runaway() + if runaway_status.should_stop: + log.warning(f"Runaway detected: {runaway_status.reason}") + output.warning(f"⚠ {runaway_status.message}") + + # Generate appropriate stop message + if runaway_status.budget_exhausted: + stop_msg = ( + self._tool_state.format_budget_exhausted_message() + ) + elif runaway_status.saturation_detected: + last_val = ( + self._tool_state._recent_numeric_results[-1] + if self._tool_state._recent_numeric_results + else 0.0 + ) + stop_msg = self._tool_state.format_saturation_message( + last_val + ) + else: + stop_msg = ( + f"**Tool execution stopped**: {runaway_status.reason}\n\n" + f"{self._tool_state.format_state_for_model()}\n\n" + "Please provide your final answer using the computed values above." + ) + + # Inject stop message and continue without tools + self.context.inject_assistant_message(stop_msg) + + # Stop streaming UI and continue to get final answer + if self.ui_manager.is_streaming_response: + await self.ui_manager.stop_streaming_response() + if hasattr(self.ui_manager, "streaming_handler"): + self.ui_manager.streaming_handler = None + + # Continue to next iteration - model will see stop message + # and should provide final answer + continue + # Check if we're at max turns if turn_count >= max_turns: output.warning( f"Maximum conversation turns ({max_turns}) reached. Stopping to prevent infinite loop." ) - self.context.conversation_history.append( - Message( - role=MessageRole.ASSISTANT, - content="I've reached the maximum number of conversation turns. The tool results have been provided above.", - ) + self.context.inject_assistant_message( + "I've reached the maximum number of conversation turns. The tool results have been provided above." ) + # Stop streaming UI before breaking + if self.ui_manager.is_streaming_response: + await self.ui_manager.stop_streaming_response() + if hasattr(self.ui_manager, "streaming_handler"): + self.ui_manager.streaming_handler = None break # Create signature to detect duplicate tool calls - import json - + # ToolCall is a Pydantic model from chuk_llm with frozen function current_signature = [] + tool_names = [] for tc in tool_calls: - if hasattr(tc, "function"): - name = getattr(tc.function, "name", "") - args = getattr(tc.function, "arguments", "") - elif isinstance(tc, dict) and "function" in tc: - name = tc["function"].get("name", "") - args = tc["function"].get("arguments", "") - else: - continue - if isinstance(args, dict): - args = json.dumps(args, sort_keys=True) + name = tc.function.name + args = tc.function.arguments # JSON string from chuk_llm current_signature.append(f"{name}:{args}") + tool_names.append(name) current_sig_str = "|".join(sorted(current_signature)) - # If this is a duplicate, stop looping and return control to user - if ( + # Check if ALL tools in this call are polling tools + # If so, exempt from duplicate detection + all_polling = all(self._is_polling_tool(n) for n in tool_names) + + # Detect TRUE duplicates: same tool(s) with exact same args + # Different args = different computation, not stuck + # Polling tools are exempt - they're meant to be called repeatedly + is_true_duplicate: bool = bool( last_tool_signature and current_sig_str == last_tool_signature - ): + and not all_polling + ) + + log.debug( + f"Duplicate check: sig={current_sig_str[:50]}, " + f"is_dup={is_true_duplicate}, all_polling={all_polling}" + ) + + if is_true_duplicate: + # True duplicate: same tool with same args + self._consecutive_duplicate_count += 1 log.warning( - f"Duplicate tool call detected: {current_sig_str}" + f"Duplicate tool call detected ({self._consecutive_duplicate_count}x): {current_sig_str[:100]}" ) + + # Check if we've exceeded max duplicates (safety valve) + if ( + self._consecutive_duplicate_count + >= self._max_consecutive_duplicates + ): + output.warning( + f"Model called exact same tool {self._consecutive_duplicate_count} times in a row.\n" + "This indicates the model is stuck. Returning to prompt." + ) + # CRITICAL: Stop streaming UI before breaking + if self.ui_manager.is_streaming_response: + await self.ui_manager.stop_streaming_response() + if hasattr(self.ui_manager, "streaming_handler"): + self.ui_manager.streaming_handler = None + break + + # Inject state summary to help model use cached values output.info( - "Tool has already been executed. Results are shown above." + "Detected repeated tool call. Using cached results and providing state summary." ) - break + state_summary = self._tool_state.format_state_for_model() + if state_summary: + state_msg = ( + "**Previously computed values (use these directly):**\n\n" + f"{state_summary}\n\n" + "Continue with the calculation using these stored values. " + "Do not re-call tools for values already computed." + ) + self.context.inject_assistant_message(state_msg) + log.info( + f"Injected state summary: {state_summary[:200]}" + ) + + # Continue to next iteration - model will see the state + continue + else: + # Not a duplicate, reset counter + self._consecutive_duplicate_count = 0 last_tool_signature = current_sig_str @@ -186,7 +435,9 @@ async def process_conversation(self, max_turns: int = 30): # Process tool calls - this will handle streaming display await self.tool_processor.process_tool_calls( - tool_calls, name_mapping + tool_calls, + name_mapping, + reasoning_content=reasoning_content, ) continue @@ -194,25 +445,48 @@ async def process_conversation(self, max_turns: int = 30): last_tool_signature = None # Display assistant response (if not already displayed by streaming) - elapsed = completion.get("elapsed_time", time.time() - start_time) + elapsed = completion.elapsed_time - if not completion.get("streaming", False): + if not completion.streaming: # Non-streaming response, display normally - self.ui_manager.print_assistant_response( + await self.ui_manager.print_assistant_message( response_content, elapsed ) else: - # Streaming response - final display already handled by finish_streaming() - # Just mark streaming as stopped and clean up - self.ui_manager.stop_streaming_response() + # Streaming response - final display already handled by streaming_handler + # Just clean up + # NOTE: Don't call stop_streaming_response() here - it was already called + # by streaming_handler.stream_response() # Clear streaming handler reference if hasattr(self.ui_manager, "streaming_handler"): self.ui_manager.streaming_handler = None - # Add to conversation history - self.context.conversation_history.append( - Message(role=MessageRole.ASSISTANT, content=response_content) - ) + # Check for unused tool results (dataflow hygiene warning) + # NOTE: Disabled for cleaner demo output - models often compute + # analytically without referencing tool results explicitly + unused_warning = self._tool_state.format_unused_warning() + if unused_warning: + log.info("Unused tool results detected at end of turn") + # output.info(unused_warning) # Disabled - too noisy for demos + + # Extract and register any value bindings from assistant text + # This allows values like "σ_d = 5" to become referenceable via $vN + if response_content and response_content != "No response": + new_bindings = self._tool_state.extract_bindings_from_text( + response_content + ) + if new_bindings: + log.info( + f"Extracted {len(new_bindings)} value bindings from assistant response" + ) + for binding in new_bindings: + log.debug( + f" ${binding.id} = {binding.raw_value} (aliases: {binding.aliases})" + ) + + # Add to conversation history via SessionManager + # Include reasoning_content if present (for DeepSeek reasoner and similar models) + await self.context.add_assistant_message(response_content) break except asyncio.CancelledError: @@ -222,62 +496,59 @@ async def process_conversation(self, max_turns: int = 30): import traceback traceback.print_exc() - self.context.conversation_history.append( - Message( - role=MessageRole.ASSISTANT, - content=f"I encountered an error: {exc}", - ) + self.context.inject_assistant_message( + f"I encountered an error: {exc}" ) + # Stop streaming UI before breaking + if self.ui_manager.is_streaming_response: + await self.ui_manager.stop_streaming_response() + if hasattr(self.ui_manager, "streaming_handler"): + self.ui_manager.streaming_handler = None break except asyncio.CancelledError: raise - async def _handle_streaming_completion(self, tools: list | None = None) -> dict: + async def _handle_streaming_completion( + self, tools: list | None = None + ) -> CompletionResponse: """Handle streaming completion with UI integration. Args: tools: Tool definitions to pass to the LLM, or None to disable tools + + Returns: + CompletionResponse with streaming metadata """ from mcp_cli.chat.streaming_handler import StreamingResponseHandler # Signal UI that streaming is starting - self.ui_manager.start_streaming_response() + await self.ui_manager.start_streaming_response() # Set the streaming handler reference in UI manager for interruption support streaming_handler = StreamingResponseHandler( - console=self.ui_manager.console, chat_display=self.ui_manager.display + display=self.ui_manager.display, runtime_config=self.runtime_config ) self.ui_manager.streaming_handler = streaming_handler try: - completion = await streaming_handler.stream_response( + # stream_response returns dict, convert to CompletionResponse + completion_dict = await streaming_handler.stream_response( client=self.context.client, messages=[msg.to_dict() for msg in self.context.conversation_history], tools=tools, ) + # Convert dict to CompletionResponse Pydantic model + completion = CompletionResponse.from_dict(completion_dict) + # Enhanced tool call validation and logging - if completion.get("tool_calls"): + if completion.tool_calls: log.debug( - f"Streaming completion returned {len(completion['tool_calls'])} tool calls" + f"Streaming completion returned {len(completion.tool_calls)} tool calls" ) - for i, tc in enumerate(completion["tool_calls"]): + for i, tc in enumerate(completion.tool_calls): log.debug(f"Streamed tool call {i}: {tc}") - # Validate tool call structure - if not self._validate_streaming_tool_call(tc): - log.warning(f"Invalid tool call structure from streaming: {tc}") - # Try to fix common issues - fixed_tc = self._fix_tool_call_structure(tc) - if fixed_tc: - completion["tool_calls"][i] = fixed_tc - log.debug(f"Fixed tool call {i}: {fixed_tc}") - else: - log.error( - f"Could not fix tool call {i}, removing from list" - ) - completion["tool_calls"].pop(i) - return completion finally: @@ -285,11 +556,16 @@ async def _handle_streaming_completion(self, tools: list | None = None) -> dict: # Will be cleared after finalization in main conversation loop pass - async def _handle_regular_completion(self, tools: list | None = None) -> dict: + async def _handle_regular_completion( + self, tools: list | None = None + ) -> CompletionResponse: """Handle regular (non-streaming) completion. Args: tools: Tool definitions to pass to the LLM, or None to disable tools + + Returns: + CompletionResponse with timing metadata """ start_time = time.time() @@ -297,7 +573,7 @@ async def _handle_regular_completion(self, tools: list | None = None) -> dict: messages_as_dicts = [ msg.to_dict() for msg in self.context.conversation_history ] - completion = await self.context.client.create_completion( + completion_dict = await self.context.client.create_completion( messages=messages_as_dicts, tools=tools, ) @@ -312,104 +588,20 @@ async def _handle_regular_completion(self, tools: list | None = None) -> dict: messages_as_dicts = [ msg.to_dict() for msg in self.context.conversation_history ] - completion = await self.context.client.create_completion( + completion_dict = await self.context.client.create_completion( messages=messages_as_dicts ) else: raise elapsed = time.time() - start_time - completion["elapsed_time"] = elapsed - completion["streaming"] = False - - result: dict = completion - return result - - def _validate_streaming_tool_call(self, tool_call: dict) -> bool: - """Validate that a tool call from streaming has the required structure.""" - try: - if not isinstance(tool_call, dict): - return False # type: ignore[unreachable] - - # Check for required fields - if "function" not in tool_call: - return False - - function = tool_call["function"] - if not isinstance(function, dict): - return False - - # Check function has name - if "name" not in function or not function["name"]: - return False - - # Validate arguments if present - if "arguments" in function: - args = function["arguments"] - if isinstance(args, str): - # Try to parse as JSON - try: - if args.strip(): # Don't try to parse empty strings - import json - - json.loads(args) - except json.JSONDecodeError: - log.warning(f"Invalid JSON arguments in tool call: {args}") - return False - elif not isinstance(args, dict): - # Arguments should be string or dict - return False - - return True - - except Exception as e: - log.error(f"Error validating streaming tool call: {e}") - return False - - def _fix_tool_call_structure(self, tool_call: dict) -> dict | None: - """Try to fix common issues with tool call structure from streaming.""" - try: - fixed = dict(tool_call) # Make a copy - - # Ensure we have required fields - if "id" not in fixed: - fixed["id"] = f"call_{hash(str(tool_call)) % 10000}" - - if "type" not in fixed: - fixed["type"] = "function" - - if "function" not in fixed: - return None # Can't fix this - function = fixed["function"] + # Add timing and streaming metadata to the dict before converting to Pydantic + completion_dict["elapsed_time"] = elapsed + completion_dict["streaming"] = False - # Fix empty name - if not function.get("name"): - return None # Can't fix missing name - - # Fix arguments - if "arguments" not in function: - function["arguments"] = "{}" - elif function["arguments"] is None: - function["arguments"] = "{}" - elif isinstance(function["arguments"], dict): - # Convert dict to JSON string - import json - - function["arguments"] = json.dumps(function["arguments"]) - elif not isinstance(function["arguments"], str): - # Convert to string - function["arguments"] = str(function["arguments"]) - - # Validate the fixed version - if self._validate_streaming_tool_call(fixed): - return fixed - else: - return None - - except Exception as e: - log.error(f"Error fixing tool call structure: {e}") - return None + # Convert to CompletionResponse Pydantic model + return CompletionResponse.from_dict(completion_dict) async def _load_tools(self): """ @@ -420,7 +612,9 @@ async def _load_tools(self): try: if hasattr(self.context.tool_manager, "get_adapted_tools_for_llm"): # EXPLICITLY specify provider for proper adaptation - provider = getattr(self.context, "provider", "openai") + from mcp_cli.config.defaults import DEFAULT_PROVIDER + + provider = getattr(self.context, "provider", DEFAULT_PROVIDER) tools_and_mapping = ( await self.context.tool_manager.get_adapted_tools_for_llm(provider) ) @@ -437,3 +631,31 @@ async def _load_tools(self): log.error(f"Error loading tools: {exc}") self.context.openai_tools = [] self.context.tool_name_mapping = {} + + def _register_user_literals_from_history(self) -> int: + """Extract and register numeric literals from recent user messages. + + Scans conversation history for the most recent user message(s) and + registers any numeric literals found. This whitelists user-provided + numbers so they pass ungrounded call detection. + + Returns: + Number of literals registered + """ + total_registered = 0 + + # Scan recent messages for user content + for msg in reversed(self.context.conversation_history): + if msg.role == MessageRole.USER and msg.content: + count = self._tool_state.register_user_literals(msg.content) + total_registered += count + log.debug(f"Registered {count} user literals from message") + # Only process the most recent user message + break + + if total_registered > 0: + log.info( + f"Registered {total_registered} user literals for ungrounded check whitelist" + ) + + return total_registered diff --git a/src/mcp_cli/chat/models.py b/src/mcp_cli/chat/models.py index 1a82d3b0..85000179 100644 --- a/src/mcp_cli/chat/models.py +++ b/src/mcp_cli/chat/models.py @@ -1,12 +1,15 @@ -"""Chat-specific Pydantic models.""" +"""Chat-specific Pydantic models and protocols.""" from __future__ import annotations import json from enum import Enum -from typing import Any +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable from pydantic import BaseModel, Field +if TYPE_CHECKING: + from mcp_cli.tools.manager import ToolManager + class MessageRole(str, Enum): """Message roles in conversation.""" @@ -17,6 +20,28 @@ class MessageRole(str, Enum): TOOL = "tool" +class MessageField(str, Enum): + """Message field names for API serialization.""" + + ROLE = "role" + CONTENT = "content" + NAME = "name" + TOOL_CALLS = "tool_calls" + TOOL_CALL_ID = "tool_call_id" + REASONING_CONTENT = "reasoning_content" + + +class ToolCallField(str, Enum): + """Tool call field names for API serialization.""" + + ID = "id" + TYPE = "type" + FUNCTION = "function" + INDEX = "index" + NAME = "name" + ARGUMENTS = "arguments" + + class FunctionCall(BaseModel): """Function call within a tool call (OpenAI format).""" @@ -45,17 +70,18 @@ class ToolCallData(BaseModel): id: str = Field(description="Tool call ID") type: str = Field(default="function", description="Type of tool call") function: FunctionCall = Field(description="Function call data") + index: int = Field(default=0, description="Tool call index in batch") model_config = {"frozen": False} def to_dict(self) -> dict[str, Any]: """Convert to dict for API.""" return { - "id": self.id, - "type": self.type, - "function": { - "name": self.function.name, - "arguments": self.function.arguments, + ToolCallField.ID: self.id, + ToolCallField.TYPE: self.type, + ToolCallField.FUNCTION: { + ToolCallField.NAME: self.function.name, + ToolCallField.ARGUMENTS: self.function.arguments, }, } @@ -63,14 +89,31 @@ def to_dict(self) -> dict[str, Any]: def from_dict(cls, data: dict[str, Any]) -> "ToolCallData": """Create from dict.""" return cls( - id=data["id"], - type=data.get("type", "function"), + id=data.get(ToolCallField.ID, ""), + type=data.get(ToolCallField.TYPE, "function"), + index=data.get(ToolCallField.INDEX, 0), function=FunctionCall( - name=data["function"]["name"], - arguments=data["function"]["arguments"], + name=data.get(ToolCallField.FUNCTION, {}).get(ToolCallField.NAME, ""), + arguments=data.get(ToolCallField.FUNCTION, {}).get( + ToolCallField.ARGUMENTS, "" + ), ), ) + def merge_chunk(self, chunk: "ToolCallData") -> None: + """Merge data from a streaming chunk into this tool call. + + Args: + chunk: New chunk data to merge + """ + # Update function name if provided + if chunk.function.name: + self.function.name = chunk.function.name + + # Accumulate arguments (concatenate JSON strings) + if chunk.function.arguments: + self.function.arguments += chunk.function.arguments + class Message(BaseModel): """A message in the conversation history.""" @@ -86,12 +129,36 @@ class Message(BaseModel): tool_call_id: str | None = Field( default=None, description="Tool call ID (for tool response messages)" ) + reasoning_content: str | None = Field( + default=None, + description="Reasoning content (for models like DeepSeek reasoner)", + ) model_config = {"frozen": False} def to_dict(self) -> dict[str, Any]: - """Convert to dict for LLM API calls.""" - return self.model_dump(exclude_none=True, mode="json") # type: ignore[no-any-return] + """Convert to dict for LLM API calls. + + Handles provider-specific requirements: + - OpenAI: Requires 'content' field in assistant messages with tool_calls + - DeepSeek Reasoner: Requires 'reasoning_content' field when model provided it + """ + result = self.model_dump(exclude_none=True, mode="json") + + # CRITICAL FIX: OpenAI (especially newer models like gpt-5-mini) requires + # the 'content' field to be present in assistant messages with tool_calls, + # even if it's null. Without this, some models may hang or reject the request. + if self.role == MessageRole.ASSISTANT and MessageField.TOOL_CALLS in result: + if MessageField.CONTENT not in result: + result[MessageField.CONTENT] = None + + # NOTE: reasoning_content is automatically included if set (not None) + # because we're not explicitly excluding it. The exclude_none=True will + # only exclude it if it's None. This is correct behavior per DeepSeek docs: + # https://api-docs.deepseek.com/guides/thinking_mode#tool-calls + # "the user needs to send the reasoning content back to the API" + + return result # type: ignore[no-any-return] @classmethod def from_dict(cls, data: dict[str, Any]) -> Message: @@ -188,3 +255,80 @@ class ChatStatus(BaseModel): def to_dict(self) -> dict[str, Any]: """Convert to dict.""" return self.model_dump(mode="json") # type: ignore[no-any-return] + + +# ────────────────────────────────────────────────────────────────────────────── +# Protocols - formalize interfaces for type safety +# ────────────────────────────────────────────────────────────────────────────── + + +@runtime_checkable +class ToolProcessorContext(Protocol): + """Protocol for context objects used by ToolProcessor. + + Formalizes the interface instead of using dynamic getattr/setattr. + This ensures type safety and makes dependencies explicit. + """ + + # Required attributes + tool_manager: "ToolManager" + conversation_history: list[Message] + + # Optional processor back-reference (set by ToolProcessor) + tool_processor: Any # Will be set to ToolProcessor instance + + def get_display_name_for_tool(self, tool_name: str) -> str: + """Get display name for a tool (may be namespaced).""" + ... + + def inject_tool_message(self, message: Message) -> None: + """Add a message directly to conversation history.""" + ... + + +@runtime_checkable +class UIManagerProtocol(Protocol): + """Protocol for UI managers used by ToolProcessor. + + Defines the minimal interface required by ToolProcessor. + The actual ChatUIManager has many more methods, but these are + the core ones used during tool execution. + + Note: Uses Any return types where the implementation varies. + """ + + # Core attributes + interrupt_requested: bool + verbose_mode: bool + console: Any # Rich Console instance + + def print_tool_call(self, tool_name: str, arguments: dict[str, Any]) -> None: + """Print tool call info to console.""" + ... + + def do_confirm_tool_execution( + self, + tool_name: str, + arguments: dict[str, Any], + ) -> bool: + """Ask user to confirm tool execution. + + Returns True if user confirms, False otherwise. + """ + ... + + async def start_tool_execution( + self, tool_name: str, arguments: dict[str, Any] + ) -> None: + """Signal start of tool execution for UI updates.""" + ... + + async def finish_tool_execution( + self, result: str | None = None, success: bool = True + ) -> None: + """Signal end of tool execution for UI updates.""" + ... + + def finish_tool_calls(self) -> None: + """Clean up after all tool calls complete.""" + ... diff --git a/src/mcp_cli/chat/response_models.py b/src/mcp_cli/chat/response_models.py new file mode 100644 index 00000000..6c114ae9 --- /dev/null +++ b/src/mcp_cli/chat/response_models.py @@ -0,0 +1,169 @@ +"""Clean Pydantic models for LLM responses - no dictionary goop! + +All LLM response handling should use these models instead of raw dicts. + +IMPORTANT: We import Message, ToolCall, FunctionCall from chuk_llm.core.models +to avoid duplicating model definitions. These are the canonical types used +across chuk-ai ecosystem. +""" + +from __future__ import annotations + +from enum import Enum +from typing import Any + +from pydantic import BaseModel, Field + +# Import canonical models from chuk_llm instead of redefining +from chuk_llm.core.models import ( + FunctionCall, + Message, + MessageRole, + ToolCall, +) + +# Re-export for backwards compatibility +__all__ = [ + "FunctionCall", + "Message", + "MessageRole", + "MessageField", + "ToolCall", + "CompletionResponse", + "convert_messages_to_models", + "convert_messages_to_dicts", +] + + +# ================================================================ +# Message Field Names (for serialization) +# ================================================================ + + +class MessageField(str, Enum): + """Message field names for serialization.""" + + ROLE = "role" + CONTENT = "content" + TOOL_CALLS = "tool_calls" + TOOL_CALL_ID = "tool_call_id" + NAME = "name" + + +# ================================================================ +# Completion Response Models +# ================================================================ + + +class CompletionResponse(BaseModel): + """LLM completion response with streaming metadata - type-safe! + + This extends the basic chuk_llm CompletionResponse with MCP-CLI-specific + streaming metadata like chunks_received, elapsed_time, etc. + + Use this instead of raw dicts for all completion handling. + No more completion.get('response', 'No response')! + """ + + response: str = Field(default="", description="Text response from LLM") + tool_calls: list[ToolCall] = Field( + default_factory=list, description="Tool calls requested by LLM" + ) + reasoning_content: str | None = Field( + default=None, description="Reasoning content (if available)" + ) + # MCP-CLI-specific streaming metadata + chunks_received: int = Field(default=0, description="Number of chunks received") + elapsed_time: float = Field(default=0.0, description="Response time in seconds") + interrupted: bool = Field(default=False, description="Was streaming interrupted") + streaming: bool = Field(default=False, description="Was this a streaming response") + + model_config = {"frozen": True} + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> CompletionResponse: + """Create from dictionary (for legacy compatibility). + + Args: + data: Raw completion dictionary + + Returns: + CompletionResponse instance + """ + # Parse tool calls if present - handle both dict and ToolCall instances + tool_calls = [] + if "tool_calls" in data and data["tool_calls"]: + for tc in data["tool_calls"]: + if isinstance(tc, ToolCall): + tool_calls.append(tc) + elif isinstance(tc, dict): + # Create ToolCall from dict using chuk_llm's model_validate + tool_calls.append(ToolCall.model_validate(tc)) + + return cls( + response=data.get("response", ""), + tool_calls=tool_calls, + reasoning_content=data.get("reasoning_content"), + chunks_received=data.get("chunks_received", 0), + elapsed_time=data.get("elapsed_time", 0.0), + interrupted=data.get("interrupted", False), + streaming=data.get("streaming", False), + ) + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary (for backwards compatibility).""" + return { + "response": self.response, + "tool_calls": [tc.to_dict() for tc in self.tool_calls], + "reasoning_content": self.reasoning_content, + "chunks_received": self.chunks_received, + "elapsed_time": self.elapsed_time, + "interrupted": self.interrupted, + "streaming": self.streaming, + } + + @property + def has_tool_calls(self) -> bool: + """Check if response includes tool calls.""" + return len(self.tool_calls) > 0 + + @property + def has_content(self) -> bool: + """Check if response has text content.""" + return bool(self.response) + + +# ================================================================ +# Helper Functions +# ================================================================ + + +def convert_messages_to_models(messages: list[dict[str, Any]]) -> list[Message]: + """Convert list of message dicts to Message models. + + Uses chuk_llm's Message.model_validate for proper Pydantic parsing. + + Args: + messages: List of raw message dictionaries + + Returns: + List of Message instances + """ + return [ + Message.model_validate(msg) if isinstance(msg, dict) else msg + for msg in messages + ] + + +def convert_messages_to_dicts(messages: list[Message]) -> list[dict[str, Any]]: + """Convert list of Message models to dicts. + + Uses chuk_llm's Message.to_dict() for proper serialization. + + Args: + messages: List of Message instances + + Returns: + List of message dictionaries + """ + return [msg.to_dict() for msg in messages] diff --git a/src/mcp_cli/chat/streaming_handler.py b/src/mcp_cli/chat/streaming_handler.py index ae73c44e..2fe62b71 100644 --- a/src/mcp_cli/chat/streaming_handler.py +++ b/src/mcp_cli/chat/streaming_handler.py @@ -1,10 +1,10 @@ -# mcp_cli/chat/streaming_handler.py - COMPLETE FINAL VERSION -""" -Enhanced streaming response handler for MCP CLI chat interface. -Handles async chunk yielding from chuk-llm with live UI updates and better integration. -Now includes proper tool call extraction from streaming chunks. +"""Refactored streaming response handler using unified display system. -FINAL FIX: Proper parameter accumulation across multiple streaming chunks. +This is a clean, async-native implementation using: +- Pydantic models for all state (zero dictionaries!) +- StreamingDisplayManager for UI (chuk-term only) +- No fallback display paths +- Type-safe throughout """ from __future__ import annotations @@ -12,758 +12,439 @@ import asyncio import json import time +from enum import Enum from typing import Any -from chuk_term.ui import output -from mcp_cli.ui.streaming_display import StreamingContext +from pydantic import BaseModel, Field -from mcp_cli.logging_config import get_logger +from mcp_cli.display import StreamingDisplayManager +from mcp_cli.chat.models import ToolCallData +from mcp_cli.config.logging import get_logger +from mcp_cli.config import RuntimeConfig, TimeoutType, load_runtime_config logger = get_logger("streaming") -class StreamingResponseHandler: - """Enhanced streaming handler with better UI integration and error handling.""" - - def __init__(self, console: Any | None = None, chat_display=None): - self.console = console # For compatibility - self.chat_display = chat_display # Centralized display manager - self.current_response = "" - self.live_display: Any | None = None - self.streaming_context: StreamingContext | None = None - self.start_time = 0.0 - self.chunk_count = 0 - self.is_streaming = False - self._response_complete = False - self._interrupted = False +class StreamingResponseField(str, Enum): + """Field names for streaming response serialization.""" + + RESPONSE = "response" + TOOL_CALLS = "tool_calls" + CHUNKS_RECEIVED = "chunks_received" + ELAPSED_TIME = "elapsed_time" + STREAMING = "streaming" + INTERRUPTED = "interrupted" + REASONING_CONTENT = "reasoning_content" + + +class StreamingResponse(BaseModel): + """Container for streaming response data. + + Pydantic model for type-safe streaming responses. + """ + + content: str = Field(description="Response content") + tool_calls: list[dict[str, Any]] = Field( + default_factory=list, description="Tool calls from the model" + ) + chunks_received: int = Field(default=0, description="Number of chunks processed") + elapsed_time: float = Field(description="Time taken for the response") + interrupted: bool = Field( + default=False, description="Whether streaming was interrupted" + ) + reasoning_content: str | None = Field( + default=None, description="Reasoning content (for DeepSeek reasoner)" + ) + streaming: bool = Field( + default=True, description="Whether this was a streaming response" + ) + + model_config = {"frozen": False} + + def to_dict(self) -> dict[str, Any]: + """Convert to dict for backwards compatibility using enums.""" + return { + StreamingResponseField.RESPONSE: self.content, + StreamingResponseField.TOOL_CALLS: self.tool_calls, + StreamingResponseField.CHUNKS_RECEIVED: self.chunks_received, + StreamingResponseField.ELAPSED_TIME: self.elapsed_time, + StreamingResponseField.STREAMING: self.streaming, + StreamingResponseField.INTERRUPTED: self.interrupted, + StreamingResponseField.REASONING_CONTENT: self.reasoning_content, + } - # Tool call tracking for streaming - self._accumulated_tool_calls: list[dict[str, Any]] = [] - self._current_tool_call: dict[str, Any] | None = None - # Track previous response to detect accumulated vs delta - self._previous_response_field = "" +class ToolCallAccumulator: + """Manages accumulation of tool calls across streaming chunks. - async def stream_response( - self, - client, - messages: list[dict[str, Any]], - tools: list[dict[str, Any | None]] | None = None, - **kwargs, - ) -> dict[str, Any]: - """ - Stream response from LLM with live UI updates and enhanced error handling. + Uses Pydantic models for type-safe state management. + Handles fragmented JSON and validates tool call structure. + """ - Args: - client: LLM client with streaming support - messages: Conversation messages - tools: Available tools for function calling - **kwargs: Additional arguments for completion + def __init__(self) -> None: + self._accumulated: list[ToolCallData] = [] - Returns: - Complete response dictionary + def process_chunk_tool_calls(self, chunk_tool_calls: list[dict[str, Any]]) -> None: + """Process tool calls from a chunk and accumulate them. + + Args: + chunk_tool_calls: Tool calls extracted from current chunk """ - self.current_response = "" - self.chunk_count = 0 - self.start_time = time.time() - self.is_streaming = True - self._response_complete = False - self._interrupted = False - self._accumulated_tool_calls = [] - self._current_tool_call = None - self._previous_response_field = "" # Reset for new streaming session + for tc_dict in chunk_tool_calls: + # Convert dict to Pydantic model + tc = ToolCallData.from_dict(tc_dict) - try: - # Check if client supports streaming via create_completion with stream=True - if hasattr(client, "create_completion"): - return await self._handle_chuk_llm_streaming( - client, messages, tools, **kwargs - ) - else: - # Client doesn't support completion, fallback - logger.debug( - "Client doesn't support create_completion, falling back to regular completion" - ) - return await self._handle_regular_completion( - client, messages, tools, **kwargs - ) + # Find or create accumulator for this tool call + existing = self._find_accumulated_call(tc.id, tc.index) - finally: - self.is_streaming = False - # Ensure streaming is properly finalized for both display systems - if not self._response_complete: - self._show_final_response() - self.live_display = None + if existing: + # Merge chunk data using intelligent JSON merging + self._merge_tool_call(existing, tc) + else: + # New tool call - add to accumulated list + self._accumulated.append(tc) + + def _find_accumulated_call(self, tc_id: str, tc_index: int) -> ToolCallData | None: + """Find existing accumulated tool call by ID or index.""" + for tc in self._accumulated: + if tc.id == tc_id or tc.index == tc_index: + return tc + return None - def interrupt_streaming(self): - """Interrupt the current streaming operation.""" - self._interrupted = True - logger.debug("Streaming interrupted by user") + def _merge_tool_call(self, existing: ToolCallData, new: ToolCallData) -> None: + """Merge new tool call data into existing accumulator.""" + # Update function name if provided + if new.function.name: + existing.function.name = new.function.name - def _show_final_response(self): - """Display the final complete response with enhanced formatting.""" - if self._response_complete or not self.current_response: - return - - # Calculate elapsed time - elapsed = time.time() - self.start_time if self.start_time else 0 - - # Finalize display - if self.chat_display: - self.chat_display.finish_streaming() - logger.debug("Finalized centralized display") - elif self.streaming_context: - # Fallback to streaming context finalization - logger.debug( - f"Finalizing streaming context with {len(self.current_response)} characters" + # Accumulate arguments using intelligent JSON merging + if new.function.arguments: + merged_args = self._merge_json_strings( + existing.function.arguments, new.function.arguments ) - try: - self.streaming_context.__exit__(None, None, None) - except Exception as e: - logger.debug(f"Error finalizing streaming context: {e}") - self.streaming_context = None - else: - # No streaming display, use regular output - output.assistant_message(self.current_response, elapsed=elapsed) - - self._response_complete = True - - async def _handle_chuk_llm_streaming( - self, - client, - messages: list[dict[str, Any]], - tools: list[dict[str, Any | None]] | None = None, - **kwargs, - ) -> dict[str, Any]: - """Handle chuk-llm's streaming with proper tool call accumulation and timeout protection.""" - tool_calls: list[dict[str, Any]] = [] + existing.function.arguments = merged_args - # Log conversation state for debugging - logger.debug(f"Starting streaming with {len(messages)} messages in history") - logger.debug(f"Tools available: {len(tools) if tools else 0}") - if messages: - last_msg = messages[-1] - logger.debug(f"Last message role: {last_msg.get('role')}, has content: {bool(last_msg.get('content'))}") + def _merge_json_strings(self, current: str, new: str) -> str: + """Merge two JSON strings intelligently. - # Start live display - self._start_live_display() + Tries multiple strategies: + 1. Parse both and merge dicts + 2. Concatenate and validate + 3. Fix common issues + """ + if not current: + return new + if not new: + return current + # Strategy 1: Both valid JSON objects - merge try: - # Use chuk-llm's streaming approach with timeout protection - # Wrap the entire streaming process in a timeout (120 seconds total) - async def stream_with_timeout(): - logger.debug("Creating streaming completion...") - async for chunk in client.create_completion( - messages=messages, tools=tools, stream=True, **kwargs - ): - if self._interrupted: - logger.debug("Breaking from stream due to interruption") - break - await self._process_chunk(chunk, tool_calls) - logger.debug("Streaming iteration completed normally") - - try: - logger.debug("Starting stream_with_timeout() with 120s timeout") - await asyncio.wait_for(stream_with_timeout(), timeout=120.0) - logger.debug("stream_with_timeout() completed successfully") - except asyncio.TimeoutError: - logger.error("Streaming timed out after 120 seconds") - raise RuntimeError("Streaming response timed out - the API may be experiencing issues") - - # IMPORTANT: After streaming is complete, finalize any remaining tool calls - await self._finalize_streaming_tool_calls(tool_calls) - - except asyncio.CancelledError: - logger.debug("Streaming cancelled") - self._interrupted = True - raise - except Exception as e: - logger.warning(f"Streaming error in chuk-llm streaming: {e}") - raise - finally: - # Ensure streaming is properly finalized for both display systems - if not self._response_complete: - self._show_final_response() - - # Build final response - elapsed = time.time() - self.start_time - result = { - "response": self.current_response, - "tool_calls": tool_calls, - "chunks_received": self.chunk_count, - "elapsed_time": elapsed, - "streaming": True, - "interrupted": self._interrupted, - } - - logger.debug(f"Streaming completed: {len(tool_calls)} tool calls extracted") - for i, tc in enumerate(tool_calls): - logger.debug( - f"Tool call {i}: {tc['function']['name']} with args: {tc['function']['arguments']}" - ) - - return result - - async def _handle_stream_completion( - self, - client, - messages: list[dict[str, Any]], - tools: list[dict[str, Any | None]] | None = None, - **kwargs, - ) -> dict[str, Any]: - """Handle alternative stream_completion method.""" - tool_calls: list[dict[str, Any]] = [] + current_obj = json.loads(current) + new_obj = json.loads(new) + if isinstance(current_obj, dict) and isinstance(new_obj, dict): + current_obj.update(new_obj) + return json.dumps(current_obj) + except json.JSONDecodeError: + pass - # Start live display - self._start_live_display() + # Strategy 2: Concatenate + combined = current + new + # Try validating try: - async for chunk in client.stream_completion( - messages=messages, tools=tools, **kwargs - ): - if self._interrupted: - logger.debug("Breaking from stream due to interruption") - break + json.loads(combined) + return combined + except json.JSONDecodeError: + pass - await self._process_chunk(chunk, tool_calls) + # Strategy 3: Fix common issues + # Try adding missing braces + if not combined.startswith("{"): + combined = "{" + combined + if not combined.endswith("}"): + combined = combined + "}" - except asyncio.CancelledError: - logger.debug("Streaming cancelled") - self._interrupted = True - raise - except Exception as e: - logger.warning(f"Streaming error in stream_completion: {e}") - raise + # Fix duplicated braces: }{ -> },{ + combined = combined.replace("}{", "},{") - # Build final response - elapsed = time.time() - self.start_time - return { - "response": self.current_response, - "tool_calls": tool_calls, - "chunks_received": self.chunk_count, - "elapsed_time": elapsed, - "streaming": True, - "interrupted": self._interrupted, - } + try: + json.loads(combined) + return combined + except json.JSONDecodeError: + # Give up, return concatenated + logger.warning(f"Could not merge JSON: {current[:50]}... + {new[:50]}...") + return current + new - async def _handle_regular_completion( - self, - client, - messages: list[dict[str, Any]], - tools: list[dict[str, Any | None]] | None = None, - **kwargs, - ) -> dict[str, Any]: - """Fallback to regular non-streaming completion.""" - logger.debug("Using non-streaming completion") + def finalize(self) -> list[dict[str, Any]]: + """Finalize and validate all accumulated tool calls. - # Show a simple loading indicator - with output.loading("Generating response..."): - result = await client.create_completion( - messages=messages, tools=tools, **kwargs - ) + Returns only valid, complete tool calls as dicts for API compatibility. + """ + finalized = [] - return { - "response": result.get("response", ""), - "tool_calls": result.get("tool_calls", []), - "chunks_received": 1, - "elapsed_time": time.time() - self.start_time, - "streaming": False, - "interrupted": False, - } + for tc in self._accumulated: + # Must have name + if not tc.function.name: + logger.debug(f"Skipping tool call with no name: {tc.id}") + continue - def _start_live_display(self): - """Start the live display for streaming updates.""" - self.start_time = time.time() - - # Use centralized display if available, otherwise fallback - if self.chat_display: - self.chat_display.start_streaming() - logger.debug("Started streaming with centralized display") - elif not self.streaming_context: - # Fallback to original StreamingContext - logger.debug(f"Console type: {type(self.console)}") - if hasattr(self.console, "width"): - logger.debug(f"Console width: {self.console.width}") - if hasattr(self.console, "size"): - logger.debug(f"Console size: {self.console.size}") - - # Create StreamingContext with the new compact display - self.streaming_context = StreamingContext( - console=self.console, - title="🤖 Assistant", - mode="response", - refresh_per_second=8, # Moderate refresh rate for stability - transient=True, # Will clear when done - ) - # Enter the context manager - self.streaming_context.__enter__() + # Validate arguments JSON + args = tc.function.arguments + if not args or args.strip() == "{}": + args = "{}" + else: + try: + json.loads(args) + except json.JSONDecodeError: + logger.warning( + f"Invalid JSON in tool call arguments, skipping: {args[:100]}" + ) + continue - self.live_display = True + # Convert Pydantic model to dict for API + cleaned = tc.to_dict() + finalized.append(cleaned) - async def _process_chunk( - self, chunk: dict[str, Any], tool_calls: list[dict[str, Any]] - ): - """Process a single streaming chunk with enhanced error handling and tool call support.""" - self.chunk_count += 1 + logger.debug(f"Finalized tool call: {tc.function.name}") - try: - # Log chunk for debugging - logger.debug(f"Processing chunk {self.chunk_count}: {chunk}") - - # Extract content from chunk - content = self._extract_chunk_content(chunk) - if content: - logger.debug( - f"Extracted streaming content: {repr(content[:50])}{'...' if len(content) > 50 else ''}" - ) - self.current_response += content - logger.debug(f"Total response length now: {len(self.current_response)}") - - # Handle tool calls in chunks - tool_call_data = self._extract_tool_calls_from_chunk(chunk) - if tool_call_data: - logger.debug(f"Extracted tool call data: {tool_call_data}") - self._process_tool_call_chunk(tool_call_data, tool_calls) - - # Update display with streaming content - if not self._interrupted and content: - if self.chat_display: - self.chat_display.update_streaming(content) - logger.debug( - f"Updated centralized display with: {repr(content[:50])}" - ) - elif self.streaming_context: - logger.debug( - f"Updating streaming context with content: {repr(content[:50])}" - ) - self.streaming_context.update(content) - logger.debug( - f"StreamingContext content length: {len(self.streaming_context.content)}" - ) + return finalized - # Minimal delay for smooth streaming - await asyncio.sleep(0.0005) # 0.5ms for very smooth streaming - except Exception as e: - logger.warning(f"Error processing chunk: {e}") - # Continue processing other chunks +class StreamingResponseHandler: + """Clean, async-native streaming handler. - def _extract_chunk_content(self, chunk: dict[str, Any]) -> str: - """Extract text content from a chuk-llm streaming chunk.""" - try: - # chuk-llm streaming format - chunk has "response" field with content - if isinstance(chunk, dict): - # Primary format for chuk-llm - if "response" in chunk: - response_field = ( - str(chunk["response"]) if chunk["response"] is not None else "" - ) + Uses unified display system - no fallbacks, no dual paths. + """ - # CRITICAL: Detect if response is accumulated or delta - # If the response field starts with what we had before, it's accumulated - if ( - response_field.startswith(self._previous_response_field) - and self._previous_response_field - ): - # It's accumulated! Extract only the new part - delta = response_field[len(self._previous_response_field) :] - self._previous_response_field = response_field - logger.debug( - f"Detected accumulated response, extracted delta: {repr(delta[:50])}" - ) - return delta - else: - # Might be a delta or first chunk - self._previous_response_field = response_field - return response_field - - # Alternative formats (for compatibility) - elif "content" in chunk: - return str(chunk["content"]) - elif "text" in chunk: - return str(chunk["text"]) - elif "delta" in chunk and isinstance(chunk["delta"], dict): - delta_content = chunk["delta"].get("content") - return str(delta_content) if delta_content is not None else "" - elif "choices" in chunk and chunk["choices"]: - choice = chunk["choices"][0] - if "delta" in choice and "content" in choice["delta"]: - delta_content = choice["delta"]["content"] - return str(delta_content) if delta_content is not None else "" - elif isinstance(chunk, str): # type: ignore[unreachable] - return chunk + def __init__( + self, + display: StreamingDisplayManager, + runtime_config: RuntimeConfig | None = None, + ): + """Initialize handler. - except Exception as e: - logger.debug(f"Error extracting content from chunk: {e}") + Args: + display: The unified display manager (required, no fallback) + runtime_config: Runtime configuration (optional, will load defaults if not provided) + """ + self.display = display + self.tool_accumulator = ToolCallAccumulator() + self._interrupted = False + self.runtime_config = runtime_config or load_runtime_config() - return "" + async def stream_response( + self, + client, + messages: list[dict[str, Any]], + tools: list[dict[str, Any]] | None = None, + **kwargs, + ) -> dict[str, Any]: + """Stream response from LLM client. - def _extract_tool_calls_from_chunk( - self, chunk: dict[str, Any] - ) -> dict[str, Any] | None: - """Extract tool call data from a streaming chunk.""" - try: - if isinstance(chunk, dict): - # Direct tool_calls field - if "tool_calls" in chunk: - logger.debug( - f"Found direct tool_calls in chunk: {chunk['tool_calls']}" - ) - result: dict[str, Any | None] = chunk["tool_calls"] - return result - - # OpenAI-style delta format - if "choices" in chunk and chunk["choices"]: - choice = chunk["choices"][0] - if "delta" in choice: - delta = choice["delta"] - if "tool_calls" in delta: - logger.debug( - f"Found tool_calls in delta: {delta['tool_calls']}" - ) - delta_result: dict[str, Any | None] = delta["tool_calls"] - return delta_result - # Sometimes tool_calls come in function_call format - if "function_call" in delta: - logger.debug( - f"Found function_call in delta: {delta['function_call']}" - ) - return {"function_call": delta["function_call"]} - - # Alternative formats - if "function_call" in chunk: - logger.debug( - f"Found direct function_call: {chunk['function_call']}" - ) - return {"function_call": chunk["function_call"]} + Args: + client: LLM client with streaming support + messages: Conversation messages + tools: Available tools for function calling + **kwargs: Additional arguments for client - except Exception as e: - logger.debug(f"Error extracting tool calls from chunk: {e}") + Returns: + Response dictionary (for backwards compatibility) + """ + # Reset state + self._interrupted = False + self.tool_accumulator = ToolCallAccumulator() - return None + # Start display + await self.display.start_streaming() + start_time = time.time() - def _process_tool_call_chunk( - self, tool_call_data: dict[str, Any], tool_calls: list[dict[str, Any]] - ): - """Process tool call chunk data and accumulate complete tool calls.""" try: - if isinstance(tool_call_data, list): # type: ignore[unreachable] - # Array of tool calls - for tc_item in tool_call_data: # type: ignore[unreachable] - self._accumulate_tool_call(tc_item, tool_calls) - elif isinstance(tool_call_data, dict): - # Single tool call or function call - if "function_call" in tool_call_data: - # Legacy function_call format - convert to tool_calls format - fc = tool_call_data["function_call"] - converted = { - "id": f"call_{len(self._accumulated_tool_calls)}", - "type": "function", - "function": fc, - } - self._accumulate_tool_call(converted, tool_calls) - else: - # Direct tool call - self._accumulate_tool_call(tool_call_data, tool_calls) - - except Exception as e: - logger.warning(f"Error processing tool call chunk: {e}") + # Check client capabilities + if not hasattr(client, "create_completion"): + logger.warning("Client doesn't support create_completion") + return await self._handle_non_streaming( + client, messages, tools, **kwargs + ) - def _accumulate_tool_call( - self, tool_call_item: dict[str, Any], tool_calls: list[dict[str, Any]] - ): - """ - Accumulate streaming tool call data into complete tool calls. + # Stream with chunk timeout protection + await self._stream_with_timeout(client, messages, tools, **kwargs) - FINAL FIX: Proper accumulation that waits for complete parameters. - """ - try: - tc_id = tool_call_item.get("id") - tc_index = tool_call_item.get("index", 0) + # Finalize tool calls + tool_calls = self.tool_accumulator.finalize() - logger.debug( - f"Accumulating tool call: id={tc_id}, index={tc_index}, item={tool_call_item}" + # Capture state values BEFORE stop_streaming clears them + chunks_received = ( + self.display.streaming_state.chunks_received + if self.display.streaming_state + else 0 + ) + reasoning_content = ( + self.display.streaming_state.reasoning_content + if self.display.streaming_state + else None ) - # Find existing tool call or create new one - existing_tc = None - for tc in self._accumulated_tool_calls: - if tc.get("id") == tc_id or ( - tc_id is None and tc.get("index") == tc_index - ): - existing_tc = tc - break + # Stop display and get final content (this clears streaming_state!) + final_content = await self.display.stop_streaming( + interrupted=self._interrupted + ) - if existing_tc is None: - # Create new tool call - existing_tc = { - "id": tc_id or f"call_{len(self._accumulated_tool_calls)}", - "type": "function", - "function": {"name": "", "arguments": ""}, - "index": tc_index, - "_streaming_state": { - "chunks_received": 0, - "name_complete": False, - "args_started": False, - "args_complete": False, - }, - } - self._accumulated_tool_calls.append(existing_tc) - logger.debug(f"Created new tool call: {existing_tc}") - - # Update streaming state - existing_tc["_streaming_state"]["chunks_received"] += 1 - - if "type" in tool_call_item: - existing_tc["type"] = tool_call_item["type"] - - if "function" in tool_call_item: - func_data = tool_call_item["function"] - existing_func = existing_tc["function"] - - # Accumulate function name - if "name" in func_data and func_data["name"] is not None: - new_name = str(func_data["name"]) - if new_name and not existing_func["name"]: - existing_func["name"] += new_name - existing_tc["_streaming_state"]["name_complete"] = True - logger.debug(f"Accumulated name: '{existing_func['name']}'") - - # CRITICAL: Accumulate arguments properly - if "arguments" in func_data: - new_args = func_data["arguments"] - current_args = existing_func["arguments"] - - logger.debug( - f"Accumulating arguments: current='{current_args}', new='{new_args}'" - ) + # Build response using captured values + elapsed = time.time() - start_time + response = StreamingResponse( + content=final_content, + tool_calls=tool_calls, + chunks_received=chunks_received, + elapsed_time=elapsed, + interrupted=self._interrupted, + reasoning_content=reasoning_content, + ) - if new_args is not None: - existing_tc["_streaming_state"]["args_started"] = True - - if isinstance(new_args, dict): - # Complete arguments object received - existing_func["arguments"] = json.dumps(new_args) - existing_tc["_streaming_state"]["args_complete"] = True - logger.debug( - f"Complete arguments dict received for {existing_func['name']}" - ) - elif isinstance(new_args, str): - if new_args.strip(): - # Non-empty string - accumulate - if not current_args: - existing_func["arguments"] = new_args - else: - merged = self._merge_argument_strings( - current_args, new_args - ) - existing_func["arguments"] = merged - - # Check if we now have complete JSON - if self._is_complete_json(existing_func["arguments"]): - existing_tc["_streaming_state"]["args_complete"] = ( - True - ) - logger.debug( - f"Arguments appear complete for {existing_func['name']}" - ) - # Empty string might indicate completion - elif existing_tc["_streaming_state"]["args_started"]: - existing_tc["_streaming_state"]["args_complete"] = True - logger.debug( - f"Empty args received - marking complete for {existing_func['name']}" - ) - else: - # Other type - convert to string - existing_func["arguments"] += str(new_args) - - logger.debug( - f"Final accumulated arguments: '{existing_func['arguments']}'" - ) + logger.info( + f"Streaming complete: {len(final_content)} chars, " + f"{len(tool_calls)} tools, {elapsed:.2f}s" + ) - # Check if this tool call is ready to be finalized - # Don't finalize during streaming - wait for end + return response.to_dict() except Exception as e: - logger.warning(f"Error accumulating tool call: {e}") - import traceback + logger.error(f"Streaming error: {e}", exc_info=True) + await self.display.stop_streaming(interrupted=True) + raise - traceback.print_exc() + async def _stream_with_timeout( + self, + client, + messages: list[dict[str, Any]], + tools: list[dict[str, Any]] | None, + **kwargs, + ) -> None: + """Stream with per-chunk timeout protection. + + Timeouts are configurable via (in priority order): + 1. CLI arguments (--tool-timeout) + 2. Environment variables (MCP_STREAMING_CHUNK_TIMEOUT, etc.) + 3. Config file (server_config.json -> timeouts.streamingChunkTimeout) + 4. Defaults (45s chunk, 300s global) + """ + # Get timeouts from runtime config (type-safe with enums!) + chunk_timeout = self.runtime_config.get_timeout(TimeoutType.STREAMING_CHUNK) + global_timeout = self.runtime_config.get_timeout(TimeoutType.STREAMING_GLOBAL) + + logger.debug( + f"Streaming timeouts: chunk={chunk_timeout}s, global={global_timeout}s" + ) + + async def stream_chunks(): + """Inner streaming function.""" + stream = client.create_completion( + messages=messages, + tools=tools, + stream=True, + **kwargs, + ) - def _is_complete_json(self, json_str: str) -> bool: - """Check if a string contains complete, valid JSON.""" - try: - if not json_str or not json_str.strip(): - return False - parsed = json.loads(json_str) - return isinstance(parsed, dict) # We expect objects for tool arguments - except json.JSONDecodeError: - return False + stream_iter = stream.__aiter__() - def _merge_argument_strings(self, current: str, new: str) -> str: - """Intelligently merge argument strings from streaming chunks.""" - try: - # If both are empty, return empty - if not current.strip() and not new.strip(): - return "" - - # If one is empty, return the other - if not current.strip(): - return new - if not new.strip(): - return current - - # Try to parse both as JSON first - try: - current_json = json.loads(current) - new_json = json.loads(new) - - # Both are valid JSON - merge them - if isinstance(current_json, dict) and isinstance(new_json, dict): - current_json.update(new_json) - return json.dumps(current_json) - else: - # One is not a dict - just use the newer one - return new - - except json.JSONDecodeError: - # At least one is not valid JSON - try concatenation - combined = current + new - - # Test if concatenation creates valid JSON + while True: try: - json.loads(combined) - return combined - except json.JSONDecodeError: - # Concatenation didn't work - try with fixes - return self._fix_concatenated_json(combined) - - except Exception as e: - logger.warning(f"Error merging argument strings: {e}") - # Fallback - just concatenate - return current + new - - def _fix_concatenated_json(self, json_str: str) -> str: - """Attempt to fix concatenated JSON strings from streaming.""" - try: - # Common fixes - fixed = json_str + # Wait for next chunk with timeout + chunk = await asyncio.wait_for( + stream_iter.__anext__(), + timeout=chunk_timeout, + ) - # Fix missing opening brace - if not fixed.strip().startswith("{") and ":" in fixed: - fixed = "{" + fixed + # Check for interrupt + if self._interrupted: + logger.debug("Stream interrupted by user") + try: + await stream_iter.aclose() + except Exception as e: + logger.debug(f"Error closing stream: {e}") + break - # Fix missing closing brace - if not fixed.strip().endswith("}") and ":" in fixed: - fixed = fixed + "}" + # Process chunk + await self._process_chunk(chunk) - # Fix concatenated objects: }{"key": "value"} -> },{"key": "value"} - fixed = fixed.replace("}{", "},{") + # Small yield for smooth UI + await asyncio.sleep(0.0005) - # Try to parse the fixed version - json.loads(fixed) - return fixed + except StopAsyncIteration: + logger.debug("Stream completed normally") + break + except asyncio.TimeoutError: + logger.warning(f"Chunk timeout after {chunk_timeout}s") + # Display user-friendly error message + from chuk_term.ui import output + + output.error( + f"\n⏱️ Streaming timeout after {chunk_timeout:.0f}s waiting for response.\n" + f"The model may be taking longer than expected to respond.\n" + f"You can increase this timeout with: --tool-timeout {chunk_timeout * 2:.0f}\n" + f"Or set in config file: timeouts.streamingChunkTimeout = {chunk_timeout * 2:.0f}" + ) + break - except json.JSONDecodeError: - # Still invalid - return as-is and let validation handle it - logger.debug(f"Could not fix concatenated JSON: {json_str}") - return json_str + # Run with global timeout + try: + await asyncio.wait_for(stream_chunks(), timeout=global_timeout) + except asyncio.TimeoutError: + logger.error(f"Global streaming timeout after {global_timeout}s") + # Display user-friendly error message + from chuk_term.ui import output + + output.error( + f"\n⏱️ Global streaming timeout after {global_timeout:.0f}s.\n" + f"The total streaming time exceeded the maximum allowed.\n" + f"You can increase this timeout with: --tool-timeout {global_timeout * 2:.0f}\n" + f"Or set MCP_STREAMING_GLOBAL_TIMEOUT={global_timeout * 2:.0f}" + ) + self._interrupted = True - async def _finalize_streaming_tool_calls(self, tool_calls: list[dict[str, Any]]): - """ - Finalize accumulated tool calls after streaming is complete. + async def _process_chunk(self, raw_chunk: dict[str, Any]) -> None: + """Process a single streaming chunk. - This is where we decide which tool calls are complete and ready to execute. + Args: + raw_chunk: Raw chunk from LLM provider """ - logger.debug("Finalizing streaming tool calls after completion") - - for tc in self._accumulated_tool_calls: - func = tc.get("function", {}) - name = func.get("name", "") - args = func.get("arguments", "") - state = tc.get("_streaming_state", {}) - - logger.debug(f"Finalizing tool call: {name}") - logger.debug(f" State: {state}") - logger.debug(f" Args: '{args}'") + # Use display to process chunk (normalizes format) + await self.display.add_chunk(raw_chunk) - # Skip if already added - if any(existing_tc.get("id") == tc.get("id") for existing_tc in tool_calls): - continue - - # Must have a name - if not name: - logger.debug("Skipping tool call without name") - continue + # Extract tool calls if present + if "tool_calls" in raw_chunk and raw_chunk["tool_calls"]: + self.tool_accumulator.process_chunk_tool_calls(raw_chunk["tool_calls"]) - # Generic parameter validation - no hard-coded tool names - if not args or args.strip() == "": - # No arguments provided - func["arguments"] = "{}" - logger.debug(f"Finalizing tool with empty args: {name}") + async def _handle_non_streaming( + self, + client, + messages: list[dict[str, Any]], + tools: list[dict[str, Any]] | None, + **kwargs, + ) -> dict[str, Any]: + """Fallback for non-streaming clients.""" + from chuk_term.ui import output - elif args.strip() == "{}": - # Empty JSON object - this could be valid for some tools - logger.debug(f"Finalizing tool with empty object: {name}") + start_time = time.time() + with output.loading("Generating response..."): + # Try to call client + if hasattr(client, "complete"): + result = await client.complete(messages=messages, tools=tools, **kwargs) else: - # Has some arguments - validate they're proper JSON - try: - parsed = json.loads(args) - if isinstance(parsed, dict): - logger.debug(f"Finalizing tool with valid args: {name}") - else: - logger.warning( - f"Tool {name} has non-object arguments: {type(parsed)}" - ) - # Still allow it - some tools might accept non-object args - except json.JSONDecodeError: - logger.warning(f"Tool {name} has invalid JSON arguments: {args}") - # Try to fix it or skip - fixed_args = self._fix_concatenated_json(args) - try: - json.loads(fixed_args) - func["arguments"] = fixed_args - logger.debug(f"Fixed JSON for tool: {name}") - except json.JSONDecodeError: - logger.warning(f"Cannot fix JSON for tool {name}, skipping") - continue - - # Clean up and add to final list - final_tc = self._clean_tool_call_for_final_list(tc) - tool_calls.append(final_tc) - logger.info(f"✅ Finalized tool call: {final_tc['function']['name']}") - - def _clean_tool_call_for_final_list( - self, tool_call: dict[str, Any] - ) -> dict[str, Any]: - """Clean up tool call for final list by removing internal tracking fields.""" - cleaned = dict(tool_call) - - # Remove streaming state - if "_streaming_state" in cleaned: - del cleaned["_streaming_state"] - - # Ensure proper structure - if "function" in cleaned and "arguments" in cleaned["function"]: - args = cleaned["function"]["arguments"] - if isinstance(args, str): - # Ensure it's valid JSON - try: - parsed = json.loads(args) - cleaned["function"]["arguments"] = json.dumps(parsed) - except json.JSONDecodeError: - # Invalid JSON - use empty object - cleaned["function"]["arguments"] = "{}" - elif isinstance(args, dict): - cleaned["function"]["arguments"] = json.dumps(args) - else: - cleaned["function"]["arguments"] = "{}" + raise RuntimeError("Client has no streaming or completion method") - return cleaned + elapsed = time.time() - start_time - def _create_display_content(self): - """Create display status (not used currently without Live display).""" - # This method is kept for compatibility but not actively used - # without rich.Live support in chuk-term - return None + return { + "response": result.get("response", ""), + "tool_calls": result.get("tool_calls", []), + "chunks_received": 1, + "elapsed_time": elapsed, + "streaming": False, + "interrupted": False, + } + + def interrupt_streaming(self) -> None: + """Interrupt current streaming operation.""" + self._interrupted = True + logger.debug("Streaming interrupted by user") diff --git a/src/mcp_cli/chat/system_prompt.py b/src/mcp_cli/chat/system_prompt.py index acabd2b2..fc1d6916 100644 --- a/src/mcp_cli/chat/system_prompt.py +++ b/src/mcp_cli/chat/system_prompt.py @@ -1,16 +1,28 @@ # mcp_cli/chat/system_prompt.py +import os -# llm imports -from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator +def generate_system_prompt(tools=None): + """Generate a concise system prompt for the assistant. -def generate_system_prompt(tools): - """Generate a concise system prompt for the assistant.""" - prompt_generator = SystemPromptGenerator() - tools_json = {"tools": tools} + Note: Tool definitions are passed via the API's tools parameter, + so we don't duplicate them in the system prompt. - system_prompt = prompt_generator.generate_prompt(tools_json) - system_prompt += """ + When dynamic tools mode is enabled (MCP_CLI_DYNAMIC_TOOLS=1), generates + a special prompt explaining the tool discovery workflow. + """ + # Check if dynamic tools mode is enabled + dynamic_mode = os.environ.get("MCP_CLI_DYNAMIC_TOOLS") == "1" + + if dynamic_mode: + return _generate_dynamic_tools_prompt(tools) + + # Count tools for the prompt (tools may be ToolInfo objects or dicts) + tool_count = len(tools) if tools else 0 + + system_prompt = f"""You are an intelligent assistant with access to {tool_count} tools to help solve user queries effectively. + +Use the available tools when appropriate to accomplish tasks. Tools are provided via the API and you can call them as needed. **GENERAL GUIDELINES:** @@ -63,3 +75,74 @@ def generate_system_prompt(tools): - Assume basic user intentions, such as fetching top results by a common metric. """ return system_prompt + + +def _generate_dynamic_tools_prompt(tools=None): + """Generate system prompt for dynamic tools mode. + + In dynamic tools mode, the LLM has access to a tool discovery system + instead of individual tools directly. This prompt explains the workflow. + + Args: + tools: The actual underlying tools (used to inform the model about + what kinds of tools are available) + """ + # Count actual tools to give context + tool_count = len(tools) if tools else 0 + + system_prompt = f"""You are an intelligent assistant with access to a TOOL DISCOVERY SYSTEM. + +**IMPORTANT: HOW TO USE TOOLS** + +You have access to {tool_count} tools through a discovery system. You MUST use the discovery tools to find and execute them: + +1. **search_tools** - Search for tools by name, description, or capability + - Use this FIRST when the user asks for something (e.g., search for "time", "weather", "calculate") + - Returns matching tools with names and descriptions + +2. **list_tools** - List all available tools + - Use when you want to see everything available + - Good for exploring capabilities + +3. **get_tool_schema** - Get detailed parameters for a specific tool + - Use AFTER finding a tool with search_tools/list_tools + - Shows required parameters and their types + +4. **call_tool** - Execute a discovered tool + - Use AFTER getting the schema + - Pass tool_name plus the tool's parameters + +**WORKFLOW EXAMPLE:** + +User: "What time is it in London?" + +Step 1: Search for relevant tools +→ Call search_tools with query="time" or query="clock" + +Step 2: Get the tool schema +→ Call get_tool_schema with tool_name from search results + +Step 3: Execute the tool +→ Call call_tool with tool_name and required parameters + +**CRITICAL RULES:** +- ALWAYS use search_tools or list_tools first to discover available tools +- NEVER assume a tool exists without checking +- ALWAYS get the schema before calling a tool +- If search returns no results, try different keywords or list all tools + +**GENERAL GUIDELINES:** + +1. Step-by-step reasoning: + - Analyze tasks systematically + - Search for relevant tools before attempting to help + - Verify tool capabilities match user needs + +2. Clear communication: + - Explain what tools you're searching for and why + - Share what you discovered + - If no suitable tool exists, tell the user + +REMEMBER: You CANNOT directly call tools like "get_time" or "weather" - you MUST discover them first using search_tools, then execute them using call_tool. +""" + return system_prompt diff --git a/src/mcp_cli/chat/testing.py b/src/mcp_cli/chat/testing.py new file mode 100644 index 00000000..ad5c4e2a --- /dev/null +++ b/src/mcp_cli/chat/testing.py @@ -0,0 +1,117 @@ +# mcp_cli/chat/testing.py +"""Testing utilities for chat module. + +This module contains test helpers that are used by both production test-mode +code paths and unit tests. These are separated from the main chat_context.py +to keep production code clean. +""" + +from __future__ import annotations + +import logging +from typing import Any + +from mcp_cli.chat.chat_context import ChatContext +from mcp_cli.model_management import ModelManager + +logger = logging.getLogger(__name__) + + +class TestChatContext(ChatContext): + """ + Test-specific ChatContext that works with stream_manager instead of ToolManager. + + This is used for: + 1. The --test-mode flag in chat handler + 2. Unit tests that need a mock context + """ + + def __init__(self, stream_manager: Any, model_manager: ModelManager): + """Create test context with stream_manager.""" + # Initialize base attributes without calling super().__init__ + self.tool_manager = None # type: ignore[assignment] # Tests don't use ToolManager + self.stream_manager = stream_manager + self.model_manager = model_manager + + # Conversation state + self.exit_requested = False + self.conversation_history: list = [] + self.tool_history: list = [] + + # ToolProcessor back-reference + self.tool_processor: Any = None + + # Tool state + self.tools: list = [] + self.internal_tools: list = [] + self.server_info: list = [] + self.tool_to_server_map: dict = {} + self.openai_tools: list = [] + self.tool_name_mapping: dict = {} + + logger.debug(f"TestChatContext created with {self.provider}/{self.model}") + + @classmethod + def create_for_testing( + cls, + stream_manager: Any, + provider: str | None = None, + model: str | None = None, + ) -> "TestChatContext": + """Factory for test contexts.""" + model_manager = ModelManager() + + if provider and model: + model_manager.switch_model(provider, model) + elif provider: + model_manager.switch_provider(provider) + elif model: + # Switch model in current provider + current_provider = model_manager.get_active_provider() + model_manager.switch_model(current_provider, model) + + return cls(stream_manager, model_manager) + + async def _initialize_tools(self) -> None: + """Test-specific tool initialization.""" + # Get tools from stream_manager + if hasattr(self.stream_manager, "get_internal_tools"): + self.tools = list(self.stream_manager.get_internal_tools()) + else: + self.tools = list(self.stream_manager.get_all_tools()) + + # Get server info + self.server_info = list(self.stream_manager.get_server_info()) + + # Build mappings - tools are ToolInfo objects + self.tool_to_server_map = { + t.name: self.stream_manager.get_server_for_tool(t.name) for t in self.tools + } + + # Convert tools to OpenAI format for tests + self.openai_tools = [ + { + "type": "function", + "function": { + "name": t.name, + "description": t.description or "", + "parameters": t.parameters or {}, + }, + } + for t in self.tools + ] + self.tool_name_mapping = {} + + # Copy for system prompt + self.internal_tools = list(self.tools) + + async def execute_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any: + """Execute tool via stream_manager.""" + if hasattr(self.stream_manager, "call_tool"): + return await self.stream_manager.call_tool(tool_name, arguments) + else: + raise ValueError("Stream manager doesn't support tool execution") + + async def get_server_for_tool(self, tool_name: str) -> str: + """Get server for tool from stream_manager.""" + return self.stream_manager.get_server_for_tool(tool_name) or "Unknown" diff --git a/src/mcp_cli/chat/tool_processor.py b/src/mcp_cli/chat/tool_processor.py index 4a2c7fdc..5483aa6c 100644 --- a/src/mcp_cli/chat/tool_processor.py +++ b/src/mcp_cli/chat/tool_processor.py @@ -2,8 +2,11 @@ """ mcp_cli.chat.tool_processor -Clean tool processor that only uses the working tool_manager execution path. -Removed the problematic stream_manager path that was causing "unhealthy connection" errors. +Simplified tool processor that delegates parallel execution to ToolManager. +Handles CLI-specific concerns: UI, conversation history, user confirmation. + +Uses chuk-tool-processor's ToolCall/ToolResult models via ToolManager. +Uses Protocol-based interfaces for type safety. """ from __future__ import annotations @@ -11,14 +14,23 @@ import asyncio import json import logging -from typing import Any +from typing import TYPE_CHECKING, Any from chuk_term.ui import output - -from mcp_cli.chat.models import Message, MessageRole -from mcp_cli.ui.formatting import display_tool_call_result +from chuk_tool_processor import ToolCall as CTPToolCall +from chuk_tool_processor import ToolResult as CTPToolResult + +from mcp_cli.chat.response_models import Message, MessageRole, ToolCall +from mcp_cli.chat.models import ToolProcessorContext, UIManagerProtocol +from chuk_ai_session_manager.guards import get_tool_state, SoftBlockReason +from mcp_cli.display import display_tool_call_result +from chuk_tool_processor.discovery import get_search_engine +from mcp_cli.llm.content_models import ContentBlockType from mcp_cli.utils.preferences import get_preference_manager +if TYPE_CHECKING: + from mcp_cli.tools.manager import ToolManager + log = logging.getLogger(__name__) @@ -26,35 +38,51 @@ class ToolProcessor: """ Handle execution of tool calls returned by the LLM. - CLEAN: Only uses tool_manager.execute_tool() which works correctly. + Delegates parallel execution to ToolManager.stream_execute_tools(), + handling only CLI-specific concerns: UI, conversation history, confirmation. + + Uses ToolProcessorContext protocol for type-safe context access. """ - def __init__(self, context, ui_manager, *, max_concurrency: int = 4) -> None: + def __init__( + self, + context: ToolProcessorContext, + ui_manager: UIManagerProtocol, + *, + max_concurrency: int = 4, + ) -> None: self.context = context self.ui_manager = ui_manager + self.max_concurrency = max_concurrency - # Tool manager for execution - self.tool_manager = getattr(context, "tool_manager", None) - - self._sem = asyncio.Semaphore(max_concurrency) - self._pending: list[asyncio.Task] = [] + # Tool manager for execution - access via protocol attribute + self.tool_manager: ToolManager | None = context.tool_manager # Track transport failures for recovery detection self._transport_failures = 0 self._consecutive_transport_failures = 0 - # Give the UI a back-pointer for Ctrl-C cancellation - setattr(self.context, "tool_processor", self) + # Track state for callbacks + self._call_metadata: dict[str, dict[str, Any]] = {} + self._cancelled = False + + # Give the context a back-pointer for Ctrl-C cancellation + # Note: This is the one place we set an attribute on context + context.tool_processor = self async def process_tool_calls( - self, tool_calls: list[Any], name_mapping: dict[str, str] | None = None + self, + tool_calls: list[Any], + name_mapping: dict[str, str] | None = None, + reasoning_content: str | None = None, ) -> None: """ - Execute tool_calls concurrently using the working tool_manager path. + Execute tool_calls in parallel using ToolManager.stream_execute_tools(). Args: tool_calls: List of tool call objects from the LLM name_mapping: Mapping from LLM tool names to actual tool names + reasoning_content: Optional reasoning content from the LLM """ if not tool_calls: output.warning("Empty tool_calls list received.") @@ -67,208 +95,479 @@ async def process_tool_calls( f"Processing {len(tool_calls)} tool calls with {len(name_mapping)} name mappings" ) + # Reset state + self._call_metadata.clear() + self._cancelled = False + + # Add assistant message with all tool calls BEFORE executing + self._add_assistant_message_with_tool_calls(tool_calls, reasoning_content) + + # Convert LLM tool calls to CTP format and check confirmations + ctp_calls: list[CTPToolCall] = [] + for idx, call in enumerate(tool_calls): if getattr(self.ui_manager, "interrupt_requested", False): + self._cancelled = True break - task = asyncio.create_task(self._run_single_call(idx, call, name_mapping)) - self._pending.append(task) + # Extract tool call details + llm_tool_name, raw_arguments, call_id = self._extract_tool_call_info( + call, idx + ) + + # Map to execution name + execution_tool_name = name_mapping.get(llm_tool_name, llm_tool_name) + + # Get display name - special handling for dynamic tool call_tool + display_name = execution_tool_name + display_arguments = raw_arguments + + # For dynamic tools, extract the actual tool name from call_tool + if execution_tool_name == "call_tool": + # Parse arguments to get the real tool name + parsed_args = self._parse_arguments(raw_arguments) + if "tool_name" in parsed_args: + actual_tool = parsed_args["tool_name"] + # Show as "call_tool → actual_tool_name" + display_name = f"call_tool → {actual_tool}" + # Filter out tool_name from displayed args to reduce noise + display_arguments = { + k: v for k, v in parsed_args.items() if k != "tool_name" + } + + if hasattr(self.context, "get_display_name_for_tool"): + # Only apply name mapping if not already a dynamic tool + if not execution_tool_name.startswith("call_tool"): + display_name = self.context.get_display_name_for_tool( + execution_tool_name + ) + + # Show tool call in UI + try: + self.ui_manager.print_tool_call(display_name, display_arguments) + except Exception as ui_exc: + log.warning(f"UI display error (non-fatal): {ui_exc}") + + # Handle user confirmation + if self._should_confirm_tool(execution_tool_name): + confirmed = self.ui_manager.do_confirm_tool_execution( + tool_name=display_name, arguments=raw_arguments + ) + if not confirmed: + setattr(self.ui_manager, "interrupt_requested", True) + self._add_cancelled_tool_to_history( + llm_tool_name, call_id, raw_arguments + ) + self._cancelled = True + break + + # Parse arguments + arguments = self._parse_arguments(raw_arguments) + + # DEBUG: Log exactly what the model sent for this tool call + log.info(f"TOOL CALL FROM MODEL: {llm_tool_name} id={call_id}") + log.info(f" raw_arguments: {raw_arguments}") + log.info(f" parsed_arguments: {arguments}") + + # Get actual tool name for checks (for call_tool, it's the inner tool) + actual_tool_for_checks = execution_tool_name + if execution_tool_name == "call_tool" and "tool_name" in arguments: + actual_tool_for_checks = arguments["tool_name"] + + # GENERIC VALIDATION: Reject tool calls with None arguments + # This catches cases where the model emits placeholders or incomplete calls + none_args = [ + k for k, v in arguments.items() if v is None and k != "tool_name" + ] + if none_args: + error_msg = ( + f"INVALID_ARGS: Tool '{actual_tool_for_checks}' called with None values " + f"for: {', '.join(none_args)}. Please provide actual values." + ) + log.warning(error_msg) + output.warning(f"⚠ {error_msg}") + self._add_tool_result_to_history( + llm_tool_name, + call_id, + f"**Error**: {error_msg}\n\nPlease retry with actual parameter values.", + ) + continue + + # Check $vN references in arguments (dataflow validation) + tool_state = get_tool_state() + ref_check = tool_state.check_references(arguments) + if not ref_check.valid: + log.warning( + f"Missing references in {actual_tool_for_checks}: {ref_check.message}" + ) + output.warning(f"⚠ {ref_check.message}") + # Add error to history instead of executing + self._add_tool_result_to_history( + llm_tool_name, + call_id, + f"**Blocked**: {ref_check.message}\n\n" + f"{tool_state.format_bindings_for_model()}", + ) + continue + + # Check for ungrounded calls (numeric args without $vN refs) + # Skip discovery tools - they don't need grounded numeric inputs + # Skip idempotent math tools - they should be allowed to compute with any literals + # Use SoftBlock repair system: attempt rebind → symbolic fallback → ask user + is_math_tool = tool_state.is_idempotent_math_tool(actual_tool_for_checks) + if ( + not tool_state.is_discovery_tool(execution_tool_name) + and not is_math_tool + ): + ungrounded_check = tool_state.check_ungrounded_call( + actual_tool_for_checks, arguments + ) + if ungrounded_check.is_ungrounded: + # Log args for observability (important for debugging) + log.info( + f"Ungrounded call to {actual_tool_for_checks} with args: {arguments}" + ) + + # Check if this tool should have auto-rebound applied + # Parameterized tools (normal_cdf, sqrt, etc.) should NOT be rebound + # because each call with different args has different semantics + if not tool_state.should_auto_rebound(actual_tool_for_checks): + # For parameterized tools, check preconditions first + # This blocks premature calls before any values are computed + precond_ok, precond_error = tool_state.check_tool_preconditions( + actual_tool_for_checks, arguments + ) + if not precond_ok: + log.warning( + f"Precondition failed for {actual_tool_for_checks}" + ) + output.warning( + f"⚠ Precondition failed for {actual_tool_for_checks}" + ) + self._add_tool_result_to_history( + llm_tool_name, call_id, f"**Blocked**: {precond_error}" + ) + continue + + # Preconditions met - log and allow execution + display_args = { + k: v for k, v in arguments.items() if k != "tool_name" + } + log.info( + f"Allowing parameterized tool {actual_tool_for_checks} with args: {display_args}" + ) + output.info(f"→ {actual_tool_for_checks} args: {display_args}") + # Fall through to execution + else: + # For other tools, try to repair using SoftBlock system + should_proceed, repaired_args, fallback_response = ( + tool_state.try_soft_block_repair( + actual_tool_for_checks, + arguments, + SoftBlockReason.UNGROUNDED_ARGS, + ) + ) + + if should_proceed and repaired_args: + # Rebind succeeded - use repaired arguments + log.info( + f"Auto-repaired ungrounded call to {actual_tool_for_checks}: " + f"{arguments} -> {repaired_args}" + ) + output.info( + f"↻ Auto-rebound arguments for {actual_tool_for_checks}" + ) + arguments = repaired_args + elif fallback_response: + # Symbolic fallback - return helpful response instead of blocking + # Show visible annotation for observability + log.info(f"Symbolic fallback for {actual_tool_for_checks}") + output.info( + f"⏸ [analysis] required_input_missing for {actual_tool_for_checks}" + ) + self._add_tool_result_to_history( + llm_tool_name, call_id, fallback_response + ) + continue + else: + # All repairs failed - add error to history + log.warning( + f"Could not repair ungrounded call to {actual_tool_for_checks}" + ) + self._add_tool_result_to_history( + llm_tool_name, + call_id, + f"Cannot proceed with `{actual_tool_for_checks}`: " + f"arguments require computed values.\n\n" + f"{tool_state.format_bindings_for_model()}", + ) + continue + + # Check per-tool call limit using the guard (handles exemptions for math/discovery) + # per_tool_cap=0 means "disabled/unlimited" (see RuntimeLimits presets) + per_tool_result = tool_state.check_per_tool_limit(actual_tool_for_checks) + if tool_state.limits.per_tool_cap > 0 and per_tool_result.blocked: + log.warning(f"Tool {actual_tool_for_checks} blocked by per-tool limit") + output.warning( + f"⚠ Tool {actual_tool_for_checks} - {per_tool_result.reason}" + ) + self._add_tool_result_to_history( + llm_tool_name, + call_id, + per_tool_result.reason or "Per-tool limit reached", + ) + continue + + # Resolve $vN references in arguments (substitute actual values) + resolved_arguments = tool_state.resolve_references(arguments) + + # Store metadata for callbacks + self._call_metadata[call_id] = { + "llm_tool_name": llm_tool_name, + "execution_tool_name": execution_tool_name, + "display_name": display_name, + "arguments": resolved_arguments, # Use resolved arguments + "raw_arguments": raw_arguments, + } + + # Create CTP ToolCall with resolved arguments + ctp_calls.append( + CTPToolCall( + id=call_id, + tool=execution_tool_name, + arguments=resolved_arguments, + ) + ) + + if self._cancelled or not ctp_calls: + await self._finish_tool_calls() + return + + if self.tool_manager is None: + raise RuntimeError("No tool manager available for tool execution") + + # Execute tools in parallel using ToolManager's streaming API try: - await asyncio.gather(*self._pending) + async for result in self.tool_manager.stream_execute_tools( + calls=ctp_calls, + on_tool_start=self._on_tool_start, + max_concurrency=self.max_concurrency, + ): + await self._on_tool_result(result) + if self._cancelled: + break # type: ignore[unreachable] except asyncio.CancelledError: pass - finally: - self._pending.clear() - # Signal UI that tool calls are complete - if hasattr(self.ui_manager, "finish_tool_calls") and callable( - self.ui_manager.finish_tool_calls - ): - try: - if asyncio.iscoroutinefunction(self.ui_manager.finish_tool_calls): - await self.ui_manager.finish_tool_calls() - else: - self.ui_manager.finish_tool_calls() - except Exception: - log.debug("finish_tool_calls() raised", exc_info=True) + await self._finish_tool_calls() def cancel_running_tasks(self) -> None: - """Cancel all running tool tasks.""" - for task in list(self._pending): - if not task.done(): - task.cancel() - - async def _run_single_call( - self, idx: int, tool_call: Any, name_mapping: dict[str, str] - ) -> None: + """Cancel running tool execution.""" + self._cancelled = True + + async def _on_tool_start(self, call: CTPToolCall) -> None: + """Callback when a tool starts execution.""" + metadata = self._call_metadata.get(call.id, {}) + display_name = metadata.get("display_name", call.tool) + arguments = metadata.get("arguments", call.arguments) + + # For dynamic tools, enhance the display + if call.tool == "call_tool" and "tool_name" in arguments: + actual_tool = arguments["tool_name"] + display_name = f"{actual_tool}" # Just show the actual tool name + # Show only the tool's arguments, not tool_name + arguments = {k: v for k, v in arguments.items() if k != "tool_name"} + + log.info(f"Executing tool: {call.tool} with args: {arguments}") + await self.ui_manager.start_tool_execution(display_name, arguments) + + async def _on_tool_result(self, result: CTPToolResult) -> None: + """Callback when a tool completes. + + ENHANCED: Now includes value binding system for dataflow tracking. + - Binds numeric results to $vN identifiers + - Tracks per-tool call counts for anti-thrash + - Caches results for state tracking """ - Execute one tool call using the clean tool_manager path. - """ - async with self._sem: - llm_tool_name = "unknown_tool" - raw_arguments: Any = {} - call_id = f"call_{idx}" - - try: - # Extract tool call details - if hasattr(tool_call, "function"): - fn = tool_call.function - llm_tool_name = getattr(fn, "name", "unknown_tool") - raw_arguments = getattr(fn, "arguments", {}) - call_id = getattr(tool_call, "id", call_id) - elif isinstance(tool_call, dict) and "function" in tool_call: - fn = tool_call["function"] - llm_tool_name = fn.get("name", "unknown_tool") - raw_arguments = fn.get("arguments", {}) - call_id = tool_call.get("id", call_id) - else: - log.error(f"Unrecognized tool call format: {type(tool_call)}") - raise ValueError( - f"Unrecognized tool call format: {type(tool_call)}" - ) + metadata = self._call_metadata.get(result.id, {}) + llm_tool_name = metadata.get("llm_tool_name", result.tool) + execution_tool_name = metadata.get("execution_tool_name", result.tool) + arguments = metadata.get("arguments", {}) + + # For dynamic tools, extract the actual tool name for better logging/caching + actual_tool_name = execution_tool_name + actual_arguments = arguments + if execution_tool_name == "call_tool" and "tool_name" in arguments: + actual_tool_name = arguments["tool_name"] + actual_arguments = {k: v for k, v in arguments.items() if k != "tool_name"} + + success = result.is_success + log.info( + f"Tool result ({actual_tool_name}): success={success}, error='{result.error}'" + ) - # Validate tool name - if not llm_tool_name or llm_tool_name == "unknown_tool": - log.error( - f"Tool name is empty or unknown in tool call: {tool_call}" - ) - llm_tool_name = f"unknown_tool_{idx}" + tool_state = get_tool_state() + value_binding = None - if not isinstance(llm_tool_name, str): - log.error(f"Tool name is not a string: {llm_tool_name}") # type: ignore[unreachable] - llm_tool_name = f"unknown_tool_{idx}" + # Cache successful results and create value bindings + if success and result.result is not None: + # Extract the actual value from MCP response structure + actual_result = self._extract_result_value(result.result) - # Map LLM tool name to execution tool name - execution_tool_name = name_mapping.get(llm_tool_name, llm_tool_name) + # Cache result for dedup + tool_state.cache_result(actual_tool_name, actual_arguments, actual_result) + log.debug(f"Cached result for {actual_tool_name}: {actual_result}") + # Create value binding ($v1, $v2, etc.) for dataflow tracking + # Only bind "execution" tool results (not discovery tools) + if not tool_state.is_discovery_tool(execution_tool_name): + value_binding = tool_state.bind_value( + actual_tool_name, actual_arguments, actual_result + ) log.info( - f"Tool execution: LLM='{llm_tool_name}' -> Execution='{execution_tool_name}'" + f"Bound value ${value_binding.id} = {actual_result} from {actual_tool_name}" ) - # Get display name for UI - display_name = execution_tool_name - if hasattr(self.context, "get_display_name_for_tool"): - display_name = self.context.get_display_name_for_tool( - execution_tool_name - ) + # Record numeric results for runaway detection + if isinstance(actual_result, (int, float)): + tool_state.record_numeric_result(float(actual_result)) - # Show tool call in UI - try: - self.ui_manager.print_tool_call(display_name, raw_arguments) - except Exception as ui_exc: - log.warning(f"UI display error (non-fatal): {ui_exc}") - - # Handle user confirmation based on preferences - if self._should_confirm_tool(execution_tool_name): - # Show confirmation prompt with tool details - confirmed = self.ui_manager.do_confirm_tool_execution( - tool_name=display_name, arguments=raw_arguments - ) - if not confirmed: - setattr(self.ui_manager, "interrupt_requested", True) - self._add_cancelled_tool_to_history( - llm_tool_name, call_id, raw_arguments - ) - return + # Increment tool call counter for budget tracking (with tool name for split budgets) + tool_state.increment_tool_call(execution_tool_name) - # Parse arguments - arguments = self._parse_arguments(raw_arguments) + # Record tool use for session-aware search boosting + # Successful tools get boosted in future search results + search_engine = get_search_engine() + search_engine.record_tool_use(actual_tool_name, success=success) - # Execute tool using tool_manager (the working path) - if self.tool_manager is None: - raise RuntimeError("No tool manager available for tool execution") + # Track per-tool call count for anti-thrash + if not tool_state.is_discovery_tool(execution_tool_name): + per_tool_status = tool_state.track_tool_call(actual_tool_name) + if per_tool_status.requires_justification: + log.warning( + f"Tool {actual_tool_name} called {per_tool_status.call_count} times" + ) - # Skip loading indicator during streaming to avoid Rich Live display conflict - if self.ui_manager.is_streaming_response: - log.info( - f"Executing tool: {execution_tool_name} with args: {arguments}" - ) - tool_result = await self.tool_manager.execute_tool( - execution_tool_name, arguments - ) - else: - with output.loading("Executing tool…"): - log.info( - f"Executing tool: {execution_tool_name} with args: {arguments}" - ) - tool_result = await self.tool_manager.execute_tool( - execution_tool_name, arguments - ) + # For discovery tools, register any tools found in results + # Also use result shape to refine tool classification + if tool_state.is_discovery_tool(execution_tool_name): + tool_state.classify_by_result(execution_tool_name, result.result) + self._register_discovered_tools( + tool_state, execution_tool_name, result.result + ) - log.info( - f"Tool result: success={tool_result.success}, error='{tool_result.error}'" - ) + # Track transport failures + self._track_transport_failures(success, result.error) - # Track transport failures for recovery - if not tool_result.success and tool_result.error: - if "Transport not initialized" in tool_result.error or "transport" in tool_result.error.lower(): - self._transport_failures += 1 - self._consecutive_transport_failures += 1 + # Format content for history - include value binding info + if success: + content = self._format_tool_response(result.result) + # Append value binding info so model sees the $vN reference + if value_binding: + content = f"{content}\n\n**RESULT: ${value_binding.id} = {value_binding.typed_value}**" + else: + content = f"Error: {result.error}" - # Warn after 3 consecutive transport failures - if self._consecutive_transport_failures >= 3: - log.warning( - f"Detected {self._consecutive_transport_failures} consecutive transport failures. " - "Transport may be in a bad state." - ) - output.warning( - f"⚠️ Multiple transport errors detected ({self._consecutive_transport_failures}). " - "The connection may need to be restarted." - ) - else: - # Reset consecutive counter on non-transport errors - self._consecutive_transport_failures = 0 - else: - # Reset on success - self._consecutive_transport_failures = 0 + # Add to conversation history + self._add_tool_result_to_history(llm_tool_name, result.id, content) - # Prepare content for conversation history - if tool_result.success: - content = self._format_tool_response(tool_result.result) - else: - content = f"Error: {tool_result.error}" + # Add to tool history for /toolhistory command (use Pydantic model, not raw dict) + if hasattr(self.context, "tool_history"): + from mcp_cli.chat.models import ToolExecutionRecord - # Add to conversation history - self._add_tool_call_to_history( - llm_tool_name, call_id, arguments, content + self.context.tool_history.append( + ToolExecutionRecord( + tool_name=execution_tool_name, + arguments=arguments, + result=result.result if success else None, + error=result.error if not success else None, ) + ) - # Add to tool history (for /toolhistory command) - if hasattr(self.context, "tool_history"): - self.context.tool_history.append( - { - "tool": execution_tool_name, - "arguments": arguments, - "result": tool_result.result - if tool_result.success - else tool_result.error, - "success": tool_result.success, - } + # Finish UI display + await self.ui_manager.finish_tool_execution(result=content, success=success) + + # Verbose mode display + if hasattr(self.ui_manager, "verbose_mode") and self.ui_manager.verbose_mode: + # Create a compatible result object for display + from mcp_cli.tools.models import ToolCallResult + + display_result = ToolCallResult( + tool_name=result.tool, + success=success, + result=result.result if success else None, + error=result.error if not success else None, + ) + display_tool_call_result(display_result, self.ui_manager.console) + + def _track_transport_failures(self, success: bool, error: str | None) -> None: + """Track transport failures for recovery detection.""" + if not success and error: + if "Transport not initialized" in error or "transport" in error.lower(): + self._transport_failures += 1 + self._consecutive_transport_failures += 1 + + if self._consecutive_transport_failures >= 3: + log.warning( + f"Detected {self._consecutive_transport_failures} consecutive transport failures." + ) + output.warning( + f"Multiple transport errors detected ({self._consecutive_transport_failures}). " + "The connection may need to be restarted." ) + else: + self._consecutive_transport_failures = 0 + else: + self._consecutive_transport_failures = 0 - # Finish tool execution in unified display - self.ui_manager.finish_tool_execution( - result=content, success=tool_result.success - ) + async def _finish_tool_calls(self) -> None: + """Signal UI that all tool calls are complete.""" + if hasattr(self.ui_manager, "finish_tool_calls") and callable( + self.ui_manager.finish_tool_calls + ): + try: + import asyncio - # Display result if in verbose mode - if ( - tool_result - and hasattr(self.ui_manager, "verbose_mode") - and self.ui_manager.verbose_mode - ): - display_tool_call_result(tool_result, self.ui_manager.console) - - except asyncio.CancelledError: - raise - except Exception as exc: - log.exception(f"Error executing tool call #{idx}") - - # Add error to conversation history - error_content = f"Error: Could not execute tool. {exc}" - self._add_tool_call_to_history( - llm_tool_name, call_id, raw_arguments, error_content - ) + if asyncio.iscoroutinefunction(self.ui_manager.finish_tool_calls): + await self.ui_manager.finish_tool_calls() + else: + self.ui_manager.finish_tool_calls() + except Exception: + log.debug("finish_tool_calls() raised", exc_info=True) + + def _extract_tool_call_info(self, tool_call: Any, idx: int) -> tuple[str, Any, str]: + """Extract tool name, arguments, and call ID from a tool call.""" + llm_tool_name = "unknown_tool" + raw_arguments: Any = {} + call_id = f"call_{idx}" + + if isinstance(tool_call, ToolCall): + llm_tool_name = tool_call.function.name + raw_arguments = tool_call.function.arguments + call_id = tool_call.id + # DEBUG: Log raw arguments from model + log.debug( + f"RAW MODEL TOOL CALL: {llm_tool_name}, " + f"raw_arguments type={type(raw_arguments).__name__}, " + f"value={raw_arguments}" + ) + elif isinstance(tool_call, dict) and "function" in tool_call: + log.warning( + f"Received dict tool call instead of ToolCall model: {type(tool_call)}" + ) + fn = tool_call["function"] + llm_tool_name = fn.get("name", "unknown_tool") + raw_arguments = fn.get("arguments", {}) + call_id = tool_call.get("id", call_id) + else: + log.error(f"Unrecognized tool call format: {type(tool_call)}") + + # Validate + if not llm_tool_name or llm_tool_name == "unknown_tool": + log.error(f"Tool name is empty or unknown in tool call: {tool_call}") + llm_tool_name = f"unknown_tool_{idx}" + + return llm_tool_name, raw_arguments, call_id def _parse_arguments(self, raw_arguments: Any) -> dict[str, Any]: """Parse raw arguments into a dictionary.""" @@ -278,9 +577,8 @@ def _parse_arguments(self, raw_arguments: Any) -> dict[str, Any]: return {} parsed: dict[str, Any] = json.loads(raw_arguments) return parsed - else: - result: dict[str, Any] = raw_arguments or {} - return result + result: dict[str, Any] = raw_arguments or {} + return result except json.JSONDecodeError as e: log.warning(f"Invalid JSON in arguments: {e}") return {} @@ -288,24 +586,160 @@ def _parse_arguments(self, raw_arguments: Any) -> dict[str, Any]: log.error(f"Error parsing arguments: {e}") return {} + def _extract_result_value(self, result: Any) -> Any: + """Extract the actual value from MCP response structures. + + MCP responses can be nested in various ways: + 1. Direct value (number, string) + 2. Dict with "content" containing MCP ToolResult with .content list + 3. Dict with "success"/"result" wrapper + 4. List of content blocks [{type: "text", text: "..."}] + 5. Object with .content attribute (MCP CallToolResult) + 6. String representation like "content=[{'type': 'text', 'text': '4.2426'}]" + + This normalizes all formats to extract the core value for binding. + """ + if result is None: + return None + + # Handle string "None" (bug in some MCP responses) + if result == "None" or result == "null": + return None + + # Handle MCP CallToolResult object (has .content attribute) + if hasattr(result, "content") and isinstance(result.content, list): + return self._extract_from_content_list(result.content) + + # Handle dict structures + if isinstance(result, dict): + # Case: {"content": } + if "content" in result: + content = result["content"] + # MCP ToolResult has a .content attribute that's a list + if hasattr(content, "content"): + return self._extract_from_content_list(content.content) + # Or it might be a direct list + if isinstance(content, list): + return self._extract_from_content_list(content) + # Or a string + if isinstance(content, str): + return self._try_parse_number(content) + + # Case: {"success": true, "result": ...} + if "success" in result and "result" in result: + inner = result["result"] + # Recurse if inner is not None/string "None" + if inner is not None and inner != "None": + return self._extract_result_value(inner) + return None + + # Case: {"isError": false, "content": ...} (MCP response wrapper) + if "isError" in result: + if result.get("isError"): + return result.get("error") or result.get("content") + return self._extract_result_value(result.get("content")) + + # Case: {"text": "value"} direct + if "text" in result and isinstance(result["text"], str): + return self._try_parse_number(result["text"]) + + # Handle list of content blocks directly + if isinstance(result, list): + return self._extract_from_content_list(result) + + # Handle string that might be a serialized structure + if isinstance(result, str): + # Check for "content=[...]" string pattern (MCP SDK repr) + if result.startswith("content=["): + return self._parse_content_repr(result) + # Try to parse as number + return self._try_parse_number(result) + + # Direct numeric values + if isinstance(result, (int, float)): + return result + + return result + + def _extract_from_content_list(self, content_list: list) -> Any: + """Extract value from a list of MCP content blocks.""" + if not content_list: + return None + + text_parts = [] + for block in content_list: + if isinstance(block, dict): + block_type = block.get("type", "") + if block_type == ContentBlockType.TEXT.value or block_type == "text": + text = block.get("text", "") + if text: + text_parts.append(text) + # Handle TextContent objects + elif hasattr(block, "type") and hasattr(block, "text"): + if block.type == "text": + text_parts.append(block.text) + + if not text_parts: + return None + + # Join all text parts + combined = "\n".join(text_parts) if len(text_parts) > 1 else text_parts[0] + return self._try_parse_number(combined) + + def _parse_content_repr(self, repr_str: str) -> Any: + """Parse a string like "content=[{'type': 'text', 'text': '4.2426'}]".""" + import re + + # Try to extract the text value using regex + match = re.search(r"'text':\s*'([^']*)'", repr_str) + if match: + text = match.group(1) + return self._try_parse_number(text) + + # Try another pattern for double quotes + match = re.search(r'"text":\s*"([^"]*)"', repr_str) + if match: + text = match.group(1) + return self._try_parse_number(text) + + return repr_str + + def _try_parse_number(self, text: str) -> Any: + """Try to parse a string as a number, return original if not possible.""" + if not text or not isinstance(text, str): + return text + + text = text.strip() + + # Handle "None" string + if text in ("None", "null", ""): + return None + + # Try float (handles integers too) + try: + return float(text) + except (ValueError, TypeError): + pass + + return text + def _format_tool_response(self, result: Any) -> str: """Format tool response for conversation history.""" - # Handle MCP SDK ToolResult objects (nested in result dict) if isinstance(result, dict): - # Check for MCP response structure: {'isError': bool, 'content': ToolResult} - if 'content' in result and hasattr(result['content'], 'content'): - # Extract content array from MCP ToolResult - tool_result_content = result['content'].content + # Check for MCP response structure + if "content" in result and hasattr(result["content"], "content"): + tool_result_content = result["content"].content if isinstance(tool_result_content, list): - # Extract text from content blocks text_parts = [] for block in tool_result_content: - if isinstance(block, dict) and block.get('type') == 'text': - text_parts.append(block.get('text', '')) + if ( + isinstance(block, dict) + and block.get("type") == ContentBlockType.TEXT.value + ): + text_parts.append(block.get("text", "")) if text_parts: - return '\n'.join(text_parts) + return "\n".join(text_parts) - # Try normal JSON serialization try: return json.dumps(result, indent=2) except (TypeError, ValueError): @@ -315,50 +749,39 @@ def _format_tool_response(self, result: Any) -> str: return json.dumps(result, indent=2) except (TypeError, ValueError): return str(result) - else: - return str(result) + return str(result) - def _add_tool_call_to_history( - self, llm_tool_name: str, call_id: str, arguments: Any, content: str + def _add_assistant_message_with_tool_calls( + self, tool_calls: list[Any], reasoning_content: str | None = None ) -> None: - """Add tool call and response to conversation history.""" + """Add assistant message with all tool calls to history.""" try: - # Format arguments for history - if isinstance(arguments, dict): - arg_json = json.dumps(arguments) - else: - arg_json = str(arguments) - - # Add assistant's tool call - self.context.conversation_history.append( - Message( - role=MessageRole.ASSISTANT, - content=None, - tool_calls=[ - { - "id": call_id, - "type": "function", - "function": { - "name": llm_tool_name, - "arguments": arg_json, - }, - } - ], - ) + assistant_msg = Message( + role=MessageRole.ASSISTANT, + content=None, + tool_calls=tool_calls, + reasoning_content=reasoning_content, ) - - # Add tool's response - self.context.conversation_history.append( - Message( - role=MessageRole.TOOL, - name=llm_tool_name, - content=content, - tool_call_id=call_id, - ) + self.context.inject_tool_message(assistant_msg) + log.debug( + f"Added assistant message with {len(tool_calls)} tool calls to history" ) + except Exception as e: + log.error(f"Error adding assistant message to history: {e}") - log.debug(f"Added tool call to conversation history: {llm_tool_name}") - + def _add_tool_result_to_history( + self, llm_tool_name: str, call_id: str, content: str + ) -> None: + """Add tool result to conversation history.""" + try: + tool_msg = Message( + role=MessageRole.TOOL, + name=llm_tool_name, + content=content, + tool_call_id=call_id, + ) + self.context.inject_tool_message(tool_msg) + log.debug(f"Added tool result to conversation history: {llm_tool_name}") except Exception as e: log.error(f"Error updating conversation history: {e}") @@ -367,22 +790,22 @@ def _add_cancelled_tool_to_history( ) -> None: """Add cancelled tool call to conversation history.""" try: - # Add user cancellation - self.context.conversation_history.append( + # User cancellation message + self.context.inject_tool_message( Message( role=MessageRole.USER, content=f"Cancel {llm_tool_name} tool execution.", ) ) - # Add assistant acknowledgment arg_json = ( json.dumps(raw_arguments) if isinstance(raw_arguments, dict) else str(raw_arguments or {}) ) - self.context.conversation_history.append( + # Assistant acknowledgement with tool call + self.context.inject_tool_message( Message( role=MessageRole.ASSISTANT, content="User cancelled tool execution.", @@ -399,8 +822,8 @@ def _add_cancelled_tool_to_history( ) ) - # Add tool cancellation response - self.context.conversation_history.append( + # Tool result + self.context.inject_tool_message( Message( role=MessageRole.TOOL, name=llm_tool_name, @@ -408,24 +831,85 @@ def _add_cancelled_tool_to_history( tool_call_id=call_id, ) ) - except Exception as e: log.error(f"Error adding cancelled tool to history: {e}") def _should_confirm_tool(self, tool_name: str) -> bool: - """Determine if a tool should be confirmed based on preferences. - - Args: - tool_name: Name of the tool to check - - Returns: - True if tool should be confirmed, False otherwise - """ - # Use preference manager for tool confirmation decision + """Check if tool requires user confirmation.""" try: prefs = get_preference_manager() return prefs.should_confirm_tool(tool_name) except Exception as e: log.warning(f"Error checking tool confirmation preference: {e}") - # Default to confirming if there's an error return True + + def _register_discovered_tools( + self, + tool_state: Any, + discovery_tool: str, + result: Any, + ) -> None: + """Register tools found by discovery operations. + + Extracts tool names from search_tools, list_tools, or get_tool_schema results + and registers them as discovered for split budget enforcement. + + Args: + tool_state: The ToolStateManager instance + discovery_tool: Name of the discovery tool (search_tools, list_tools, get_tool_schema) + result: Raw result from the discovery tool + """ + if result is None: + return + + try: + # Extract tool names from various result formats + tool_names: list[str] = [] + + # Handle string result (might be JSON) + if isinstance(result, str): + try: + result = json.loads(result) + except json.JSONDecodeError: + return + + # Handle list of tools (from search_tools or list_tools) + if isinstance(result, list): + for item in result: + if isinstance(item, dict): + # Common keys for tool name + for key in ("name", "tool_name", "tool"): + if key in item: + tool_names.append(str(item[key])) + break + elif isinstance(item, str): + tool_names.append(item) + + # Handle dict result (from get_tool_schema or single tool) + elif isinstance(result, dict): + # Direct tool schema + if "name" in result: + tool_names.append(str(result["name"])) + # Nested tools list + elif "tools" in result and isinstance(result["tools"], list): + for tool in result["tools"]: + if isinstance(tool, dict) and "name" in tool: + tool_names.append(str(tool["name"])) + elif isinstance(tool, str): + tool_names.append(tool) + # Content wrapper + elif "content" in result: + # Recursively extract from content + self._register_discovered_tools( + tool_state, discovery_tool, result["content"] + ) + return + + # Register each discovered tool + for name in tool_names: + if name: + tool_state.register_discovered_tool(name) + log.debug(f"Discovered tool via {discovery_tool}: {name}") + + except Exception as e: + log.warning(f"Error registering discovered tools: {e}") diff --git a/src/mcp_cli/chat/ui_manager.py b/src/mcp_cli/chat/ui_manager.py index b3f47933..cb92e749 100644 --- a/src/mcp_cli/chat/ui_manager.py +++ b/src/mcp_cli/chat/ui_manager.py @@ -1,15 +1,15 @@ -""" -Clean, simplified Chat UI Manager using chuk-term properly. +"""Clean, async-native Chat UI Manager using unified display system. -This module provides the UI management for chat mode, handling: -- User input with prompt_toolkit -- Tool execution confirmations -- Message display using chuk-term's themed output -- Signal handling for interrupts +This module provides UI management for chat mode with: +- StreamingDisplayManager for all display operations +- Async-native throughout +- No fallback display paths +- Clean integration with chuk-term """ from __future__ import annotations +import asyncio import json import logging import signal @@ -19,20 +19,18 @@ from prompt_toolkit import PromptSession from prompt_toolkit.auto_suggest import AutoSuggestFromHistory +from prompt_toolkit.shortcuts import CompleteStyle from prompt_toolkit.history import FileHistory +from prompt_toolkit.key_binding import KeyBindings +from prompt_toolkit.keys import Keys from prompt_toolkit.styles import Style from chuk_term.ui import output from chuk_term.ui import prompts from chuk_term.ui.theme import get_theme -from mcp_cli.ui.color_converter import create_transparent_completion_style -from mcp_cli.chat.models import ToolExecutionState - +from mcp_cli.display import StreamingDisplayManager, create_transparent_completion_style from mcp_cli.chat.command_completer import ChatCommandCompleter - -# Use unified command system through adapter -from mcp_cli.adapters.chat import ChatCommandAdapter from mcp_cli.commands import register_all_commands from mcp_cli.utils.preferences import get_preference_manager @@ -40,14 +38,16 @@ class ChatUIManager: - """Manages the chat UI with clean chuk-term integration.""" + """Manages chat UI with unified async display system.""" def __init__(self, context) -> None: - """Initialize the UI manager with context.""" + """Initialize UI manager. + + Args: + context: Chat context containing client, history, etc. + """ self.context = context - self.verbose_mode = False # Default to compact mode for cleaner output - self.tools_running = False - self.interrupt_requested = False + self.verbose_mode = False # Tool tracking self.tool_calls: list[dict[str, Any]] = [] @@ -56,19 +56,13 @@ def __init__(self, context) -> None: self.current_tool_start_time: float | None = None # Streaming state - self.is_streaming_response = False self.streaming_handler: Any | None = None - self._pending_tool: ToolExecutionState | None = None + self.tools_running = False # Compatibility - # Centralized display manager - from mcp_cli.ui.chat_display_manager import ChatDisplayManager + # Unified display manager (async-native, chuk-term only) + self.display = StreamingDisplayManager() - self.display = ChatDisplayManager() - - # Add console attribute for compatibility with streaming handler - self.console = None # Not using Rich console, using chuk-term instead - - # Signal handling - signal.signal returns various types + # Signal handling self._prev_sigint_handler: ( Callable[[int, FrameType | None, Any], int] | signal.Handlers | None ) = None @@ -80,329 +74,345 @@ def __init__(self, context) -> None: self.last_input: str | None = None def _init_prompt_session(self) -> None: - """Initialize the prompt_toolkit session.""" - # Get history file from preferences + """Initialize prompt_toolkit session with history.""" pref_manager = get_preference_manager() history_path = pref_manager.get_history_file() - # Create prompt session with history and auto-suggestions - # Use theme colors for autocomplete with terminal background theme = get_theme() # Determine background color based on theme - # Light themes use white/light background, dark themes use black if theme.name in ["light"]: bg_color = "white" elif theme.name in ["minimal", "terminal"]: - bg_color = "" # No background + bg_color = "" else: - bg_color = "black" # Default for dark themes - - # Create style for autocomplete menu matching terminal background - style = Style.from_dict( - create_transparent_completion_style(theme.colors, bg_color) - ) - + bg_color = "black" + + # Create completion style + completion_style = create_transparent_completion_style(theme.colors, bg_color) + + # Create style from completion dict + merged_style = Style.from_dict(completion_style) + + # Initialize command registry + register_all_commands() + + # Create completer (uses context dict) + completer = ChatCommandCompleter(self.context.to_dict()) + + # Create key bindings for Tab completion behavior + bindings = KeyBindings() + + @bindings.add(Keys.Tab) + def handle_tab(event): + """Handle Tab key: accept suggestion, complete command, or cycle completions.""" + buff = event.app.current_buffer + + # Priority 1: If completion menu is showing, cycle through completions + if buff.complete_state: + buff.complete_next() + # If only one completion, apply it immediately + if buff.complete_state and len(buff.complete_state.completions) == 1: + buff.complete_state = None + # Priority 2: If there's an auto-suggestion (gray text from history), accept it + elif buff.suggestion: + buff.insert_text(buff.suggestion.text) + # Priority 3: Start slash command completion if typing a command + elif buff.text.startswith("/"): + buff.start_completion(select_first=True) + # Priority 4: For non-command text, try to find history match + else: + # Try to trigger auto-suggest and accept if found + suggestion = buff.auto_suggest.get_suggestion(buff, buff.document) + if suggestion: + buff.insert_text(suggestion.text) + + # Create session with all features + # Note: enable_history_search conflicts with complete_while_typing + # (up arrows browse completions vs history), so we disable history search + # to allow slash command completion to work as you type self.session: PromptSession = PromptSession( history=FileHistory(str(history_path)), auto_suggest=AutoSuggestFromHistory(), - completer=ChatCommandCompleter(self.context.to_dict()), - complete_while_typing=True, - style=style, - message="> ", + enable_history_search=False, # Disabled: conflicts with complete_while_typing + completer=completer, + complete_while_typing=True, # Auto-trigger completions as you type + complete_in_thread=False, # Complete in main thread for responsiveness + style=merged_style, + complete_style=CompleteStyle.MULTI_COLUMN, # Show completions in multi-column menu + key_bindings=bindings, # Custom Tab behavior ) - # ─── User Input ─────────────────────────────────────────────────────── + logger.debug("Prompt session initialized with history and commands") - async def get_user_input(self) -> str: - """Get user input using prompt_toolkit.""" + # ==================== USER INPUT ==================== + + async def get_user_input(self, prompt: str = "You") -> str: + """Get user input with async prompt. + + Args: + prompt: Prompt text to display + + Returns: + User input string + """ try: - msg = await self.session.prompt_async() - self.last_input = msg.strip() - return self.last_input or "" + # Run prompt in executor since it's blocking + loop = asyncio.get_event_loop() + user_input = await loop.run_in_executor( + None, self.session.prompt, f"\n💬 {prompt}: " + ) + + self.last_input = user_input + return str(user_input).strip() + except (KeyboardInterrupt, EOFError): - raise - except Exception as exc: - logger.error(f"Error getting user input: {exc}") - raise + return "/exit" - # ─── Message Display ───────────────────────────────────────────────── + # ==================== MESSAGE DISPLAY ==================== def print_user_message(self, message: str) -> None: - """Display user message using centralized display.""" + """Display user message. + + Args: + message: User message to display + """ self.display.show_user_message(message or "[No Message]") self.tool_calls.clear() - def print_assistant_response(self, content: str, elapsed: float) -> None: - """Display assistant response using centralized display.""" - # Stop streaming if active - if self.is_streaming_response: - self.stop_streaming_response() - - # Show any pending tool execution now (after streaming completes) - if self._pending_tool: - # Don't start tool execution here, wait for tool processor to finish - pass - - # Clean up any tool tracking - self._cleanup_tool_display() + async def print_assistant_message(self, content: str, elapsed: float = 0) -> None: + """Display assistant message. - # If we have pending tools, store the response for later - if self._pending_tool: - self._final_response = (content or "[No Response]", elapsed) - logger.debug("Storing final assistant response until after tool execution") + Args: + content: Assistant message content + elapsed: Elapsed time for response + """ + # If we were streaming, it's already displayed + if self.display.is_streaming: + await self.display.stop_streaming() else: - # No pending tools, show response immediately - self.display.show_assistant_message(content or "[No Response]", elapsed) - - # ─── Tool Display ──────────────────────────────────────────────────── + # Not streaming, show message directly + output.print(f"\n🤖 Assistant ({elapsed:.1f}s):") + output.print(content or "[No Response]") - def print_tool_call(self, tool_name: str, raw_args: Any) -> None: - """Display a tool call using chuk-term or integrate with streaming.""" - try: - # Start timing if first tool - if not self.tool_start_time: - self.tool_start_time = time.time() - self.tools_running = True - - # Process arguments - try: - if isinstance(raw_args, str): - processed_args = json.loads(raw_args) if raw_args.strip() else {} - else: - processed_args = raw_args or {} - except json.JSONDecodeError: - processed_args = {"raw": str(raw_args)} + # ==================== TOOL DISPLAY ==================== - # Track the tool call - self.tool_calls.append({"name": tool_name, "args": processed_args}) + async def start_tool_execution( + self, tool_name: str, arguments: dict[str, Any] + ) -> None: + """Start tool execution display. - # Always defer tool display until after streaming completes - logger.debug(f"Storing tool call for later display: {tool_name}") - # Store tool info for display after streaming - self._pending_tool = ToolExecutionState( - name=tool_name, arguments=processed_args, start_time=0.0 - ) - return + Args: + tool_name: Name of tool being executed + arguments: Tool arguments + """ + # Format arguments for display + processed_args = {} + for k, v in arguments.items(): + if isinstance(v, (dict, list)): + processed_args[k] = json.dumps(v) + else: + processed_args[k] = str(v) - except Exception as exc: - logger.error(f"Error displaying tool call: {exc}") - output.warning(f"Error displaying tool call: {exc}") + await self.display.start_tool_execution(tool_name, processed_args) - def _integrate_tool_call_into_streaming( - self, tool_name: str, processed_args: dict - ) -> None: - """Show tool call - during streaming, just display a simple message.""" - try: - # During streaming, don't interfere with the active display - # Just show a simple tool message - if self.is_streaming_response: - logger.debug(f"Tool call during streaming: {tool_name}") - # Let the unified display handle it naturally - if hasattr(self, "display") and hasattr( - self.display, "start_tool_execution" - ): - self.display.start_tool_execution(tool_name, processed_args) - else: - # Not streaming, show a proper tool panel - output.tool_call(tool_name, processed_args) + def print_tool_call(self, tool_name: str, raw_arguments: Any) -> None: + """Print tool call notification before execution. - except Exception as exc: - logger.warning(f"Error showing tool call: {exc}") - logger.info(f"Tool call: {tool_name} with args: {processed_args}") + Note: This is called but the output is immediately cleared by streaming display. + The actual tool parameters are shown in the tool execution status line instead. - def finish_tool_execution( - self, result: str | None = None, success: bool = True - ) -> None: - """Finish tool execution in centralized display.""" - # Show pending tool if we have one (after streaming completes) - if self._pending_tool: - self.display.start_tool_execution( - self._pending_tool.name, self._pending_tool.arguments - ) - # Brief pause to let animation show - import time + Args: + tool_name: Name of the tool being called + raw_arguments: Raw arguments (JSON string or dict) + """ + # Don't print here - streaming display will show it in the status line + # The display manager shows tool name + arguments during execution + pass - time.sleep(0.5) - self._pending_tool = None + def do_confirm_tool_execution(self, tool_name: str, arguments: Any) -> bool: + """Prompt user to confirm tool execution. - self.display.finish_tool_execution(result or "", success) - logger.debug(f"Finished tool execution: success={success}") + Args: + tool_name: Name of the tool + arguments: Tool arguments - # Now show the final assistant response if we have it stored - if hasattr(self, "_final_response"): - content, elapsed = self._final_response - self.display.show_assistant_message(content, elapsed) - delattr(self, "_final_response") + Returns: + True if user confirms, False otherwise + """ + from chuk_term.ui import output - def _cleanup_tool_display(self) -> None: - """Clean up tool tracking and display.""" - if self.tool_start_time: + # Parse arguments for display + if isinstance(arguments, str): try: - time.time() - self.tool_start_time - # Unified display handles its own output, no need for separate info message - pass - except Exception: - pass + args = json.loads(arguments) if arguments else {} + except json.JSONDecodeError: + args = {"raw": arguments} + else: + args = arguments or {} - # Reset tool tracking - self.tools_running = False - self.interrupt_requested = False - self.tool_calls.clear() - self.tool_times.clear() - self.tool_start_time = None - self.current_tool_start_time = None + # Show tool and arguments + output.warning(f"⚠️ Tool confirmation required: {tool_name}") + args_str = json.dumps(args, indent=2) if isinstance(args, dict) else str(args) + output.print(f"Parameters:\n{args_str}") - # ─── Tool Confirmation ─────────────────────────────────────────────── + # Prompt for confirmation + response = input("\nExecute this tool? [Y/n]: ").strip().lower() + return response in ("", "y", "yes") - def do_confirm_tool_execution( - self, tool_name: str | None = None, arguments: Any = None - ) -> bool: - """ - Prompt user to confirm tool execution with risk information. + async def finish_tool_execution( + self, result: str | None = None, success: bool = True + ) -> None: + """Finish tool execution display. Args: - tool_name: Name of the tool to execute - arguments: Tool arguments (for display) - - Returns: - True if user confirms, False otherwise + result: Tool execution result + success: Whether execution succeeded """ - try: - prefs = get_preference_manager() - - if tool_name: - # Get risk level for the tool - risk_level = prefs.get_tool_risk_level(tool_name) - risk_indicator = {"safe": "✓", "moderate": "⚠", "high": "⚠️"}.get( - risk_level, "?" - ) - - # Build confirmation message - message = f"{risk_indicator} Execute {tool_name} ({risk_level} risk)?" - output.print(message) - output.hint("y=yes, n=no, a=always allow, s=skip always") - - # Get response - response = prompts.ask("", default="y").strip().lower() - else: - # Simple confirmation - response = prompts.confirm("Execute the tool?", default=True) - response = "y" if response else "n" + await self.display.stop_tool_execution(result or "", success) - # Handle response - if response in ["y", ""]: - return True - elif response == "a" and tool_name: - # Always allow this tool - prefs.set_tool_confirmation(tool_name, "never") - output.success(f"{tool_name} will no longer require confirmation") - return True - elif response == "s" and tool_name: - # Always confirm this tool - prefs.set_tool_confirmation(tool_name, "always") - output.warning(f"{tool_name} will always require confirmation") - return False - else: - # User declined - output.info("Tool execution cancelled") - return False + # ==================== STREAMING SUPPORT ==================== - except KeyboardInterrupt: - logger.info("Tool execution cancelled by user via Ctrl-C") - output.info("Tool execution cancelled") - return False - except Exception as e: - logger.error(f"Error during tool confirmation: {e}") - return False + @property + def is_streaming_response(self) -> bool: + """Whether currently streaming a response.""" + return self.display.is_streaming - # ─── Streaming Support ─────────────────────────────────────────────── + async def start_streaming_response(self) -> None: + """Start streaming response (handled by display manager).""" + # Display manager handles this via streaming_handler + pass - def start_streaming_response(self) -> None: - """Mark that a streaming response has started.""" - self.is_streaming_response = True - logger.debug("Started streaming response") + async def stop_streaming_response(self) -> None: + """Stop streaming response.""" + if self.display.is_streaming: + await self.display.stop_streaming(interrupted=True) - def stop_streaming_response(self) -> None: - """Mark that streaming has stopped.""" - self.is_streaming_response = False - logger.debug("Stopped streaming response") + def stop_streaming_response_sync(self) -> None: + """Stop streaming response (sync version for cleanup).""" + # Best-effort cleanup, don't await + pass def interrupt_streaming(self) -> None: - """Interrupt streaming if active.""" - if self.is_streaming_response and self.streaming_handler: - try: - self.streaming_handler.interrupt() - logger.debug("Interrupted streaming") - except Exception as e: - logger.warning(f"Could not interrupt streaming: {e}") + """Interrupt current streaming operation.""" + if self.streaming_handler and hasattr( + self.streaming_handler, "interrupt_streaming" + ): + self.streaming_handler.interrupt_streaming() + + # ==================== SIGNAL HANDLING ==================== + + def setup_signal_handlers(self) -> None: + """Setup signal handlers for graceful interruption.""" + self._prev_sigint_handler = signal.signal( # type: ignore[assignment] + signal.SIGINT, self._handle_sigint + ) - # ─── Signal Handling ───────────────────────────────────────────────── + def restore_signal_handlers(self) -> None: + """Restore original signal handlers.""" + if self._prev_sigint_handler is not None: + signal.signal(signal.SIGINT, self._prev_sigint_handler) # type: ignore[arg-type] - def setup_interrupt_handler(self) -> None: - """Set up Ctrl-C handler for tool interruption.""" - try: + def _handle_sigint(self, signum: int, frame: FrameType | None) -> None: + """Handle SIGINT (Ctrl+C) gracefully.""" + current_time = time.time() - def _handler(signum: int, frame: FrameType | None) -> None: - current_time = time.time() + # Track interrupt count for double-tap exit + if current_time - self._last_interrupt_time > 2.0: + self._interrupt_count = 0 - # Reset counter if too much time passed - if current_time - self._last_interrupt_time > 2.0: - self._interrupt_count = 0 + self._interrupt_count += 1 + self._last_interrupt_time = current_time - self._last_interrupt_time = current_time - self._interrupt_count += 1 + if self._interrupt_count >= 2: + # Double tap - force exit + output.warning("\n\nForce exit requested") + raise KeyboardInterrupt - # Handle streaming interruption - if self.is_streaming_response: - output.warning("Interrupting streaming response...") - self.interrupt_streaming() - return + # Single tap - try graceful interrupt + if self.display.is_streaming: + self.interrupt_streaming() + output.warning( + "\n\n⚠️ Interrupting streaming... (Ctrl+C again to force exit)" + ) + else: + output.warning("\n\n⚠️ Interrupted (Ctrl+C again to exit)") - # Handle tool interruption - if self.tools_running and not self.interrupt_requested: - self.interrupt_requested = True - output.warning("Interrupt requested - cancelling tool execution...") - self._interrupt_now() - elif self.tools_running and self._interrupt_count >= 2: - output.error("Force terminating operation...") - self.stop_tool_calls() + # ==================== TOOL CONFIRMATIONS ==================== - # Save and set handler - self._prev_sigint_handler = signal.signal(signal.SIGINT, _handler) # type: ignore[assignment] + async def confirm_tool_execution( + self, tool_name: str, arguments: dict[str, Any] + ) -> bool: + """Prompt user to confirm tool execution. - except Exception as exc: - logger.warning(f"Could not set up interrupt handler: {exc}") + Args: + tool_name: Name of tool to execute + arguments: Tool arguments - def _restore_sigint_handler(self) -> None: - """Restore the previous signal handler.""" - if self._prev_sigint_handler: - try: - signal.signal(signal.SIGINT, self._prev_sigint_handler) # type: ignore[arg-type] - self._prev_sigint_handler = None - except Exception as exc: - logger.warning(f"Could not restore signal handler: {exc}") + Returns: + True if confirmed, False otherwise + """ + # Format arguments for display + args_display = json.dumps(arguments, indent=2) + + # Show tool info + output.info(f"\n🔧 Tool: {tool_name}") + output.print(f"Arguments:\n{args_display}\n") + + # Get confirmation + loop = asyncio.get_event_loop() + result = await loop.run_in_executor(None, prompts.confirm, "Execute this tool?") + + return bool(result) + + # ==================== STATUS & INFO ==================== + + def show_status(self) -> None: + """Show current chat status.""" + status = self.context.get_status_summary() + + output.info("📊 Chat Status:") + output.print(f" Provider: {status.provider}") + output.print(f" Model: {status.model}") + output.print(f" Messages: {status.message_count}") + output.print(f" Tools: {status.tool_count}") + output.print(f" Servers: {status.server_count}") + output.print(f" Tool Executions: {status.tool_execution_count}") + + def show_help(self) -> None: + """Show help message.""" + output.info("💬 Chat Commands:") + output.print(" /help - Show this help") + output.print(" /status - Show status") + output.print(" /clear - Clear conversation") + output.print(" /history - Show conversation history") + output.print(" /exit - Exit chat") + output.print("\n💡 Tip: Ctrl+C to interrupt streaming") + + def cleanup(self) -> None: + """Cleanup UI manager resources.""" + self.restore_signal_handlers() + logger.debug("UI manager cleaned up") + + # ==================== COMPATIBILITY METHODS ==================== def _interrupt_now(self) -> None: - """Interrupt running tools immediately.""" - if hasattr(self.context, "tool_processor"): - self.context.tool_processor.cancel_running_tasks() + """Immediate interrupt (compatibility method).""" + self.interrupt_streaming() def stop_tool_calls(self) -> None: - """Stop all tool calls and clean up.""" + """Stop tool calls (compatibility method).""" self.tools_running = False - self.tool_calls.clear() - self.tool_times.clear() - self.tool_start_time = None - self.current_tool_start_time = None - # Compatibility alias - finish_tool_calls = stop_tool_calls + async def handle_command(self, user_input: str) -> bool: + """Handle slash command. - # ─── Command Handling ──────────────────────────────────────────────── + Args: + user_input: User input string - async def handle_command(self, cmd: str) -> bool: - """Process a slash command.""" + Returns: + True if handled as command, False otherwise + """ try: # Ensure commands are registered register_all_commands() @@ -417,7 +427,9 @@ async def handle_command(self, cmd: str) -> bool: } # Use the unified command adapter - handled = await ChatCommandAdapter.handle_command(cmd, context) + from mcp_cli.adapters.chat import ChatCommandAdapter + + handled = await ChatCommandAdapter.handle_command(user_input, context) # Check if context requested exit if self.context.exit_requested: @@ -426,16 +438,6 @@ async def handle_command(self, cmd: str) -> bool: return handled except Exception as exc: - logger.error(f"Error handling command '{cmd}': {exc}") - output.error(f"Error executing command: {exc}") - return True - - # ─── Cleanup ───────────────────────────────────────────────────────── - - def cleanup(self) -> None: - """Clean up resources.""" - try: - self._cleanup_tool_display() - self._restore_sigint_handler() - except Exception as exc: - logger.warning(f"Error during cleanup: {exc}") + logger.exception("Error handling command") + output.error(f"Error handling command: {exc}") + return False diff --git a/src/mcp_cli/cli/__init__.py b/src/mcp_cli/cli/__init__.py deleted file mode 100644 index 1e46588f..00000000 --- a/src/mcp_cli/cli/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# mcp_cli/cli/__init__.py -""" -mcp_cli.cli - -Holds the CLI-facing registry and subcommands under cli/commands. -""" diff --git a/src/mcp_cli/cli/commands/__init__.py b/src/mcp_cli/cli/commands/__init__.py deleted file mode 100644 index 7789b65b..00000000 --- a/src/mcp_cli/cli/commands/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# mcp_cli/cli/commands/__init__.py -"""CLI commands package.""" diff --git a/src/mcp_cli/commands/__init__.py b/src/mcp_cli/commands/__init__.py index b937d542..a8238895 100644 --- a/src/mcp_cli/commands/__init__.py +++ b/src/mcp_cli/commands/__init__.py @@ -69,27 +69,39 @@ def register_all_commands() -> None: This should be called once during application startup. """ - # Import all command implementations - from mcp_cli.commands.definitions.servers import ServersCommand - from mcp_cli.commands.definitions.server_singular import ServerSingularCommand - from mcp_cli.commands.definitions.help import HelpCommand - from mcp_cli.commands.definitions.exit import ExitCommand - from mcp_cli.commands.definitions.clear import ClearCommand - from mcp_cli.commands.definitions.tools import ToolsCommand - from mcp_cli.commands.definitions.providers import ProviderCommand - from mcp_cli.commands.definitions.provider_singular import ProviderSingularCommand - from mcp_cli.commands.definitions.models import ModelCommand - from mcp_cli.commands.definitions.ping import PingCommand - from mcp_cli.commands.definitions.theme_singular import ThemeSingularCommand - from mcp_cli.commands.definitions.themes_plural import ThemesPluralCommand - from mcp_cli.commands.definitions.resources import ResourcesCommand - from mcp_cli.commands.definitions.prompts import PromptsCommand - from mcp_cli.commands.definitions.conversation import ConversationCommand - from mcp_cli.commands.definitions.verbose import VerboseCommand - from mcp_cli.commands.definitions.interrupt import InterruptCommand - from mcp_cli.commands.definitions.tool_history import ToolHistoryCommand - from mcp_cli.commands.definitions.execute_tool import ExecuteToolCommand - from mcp_cli.commands.definitions.token import TokenCommand + # Import all command implementations from grouped modules + from mcp_cli.commands.core import ( + HelpCommand, + ExitCommand, + ClearCommand, + VerboseCommand, + InterruptCommand, + ) + from mcp_cli.commands.tools import ( + ToolsCommand, + ExecuteToolCommand, + ToolHistoryCommand, + ) + from mcp_cli.commands.servers import ( + ServersCommand, + ServerSingularCommand, + PingCommand, + ) + from mcp_cli.commands.providers import ( + ProviderCommand, + ProviderSingularCommand, + ModelCommand, + ) + from mcp_cli.commands.resources import ( + ResourcesCommand, + PromptsCommand, + ) + from mcp_cli.commands.tokens import TokenCommand + from mcp_cli.commands.theme import ( + ThemeSingularCommand, + ThemesPluralCommand, + ) + from mcp_cli.commands.conversation import ConversationCommand # Register basic commands registry.register(HelpCommand()) diff --git a/src/mcp_cli/commands/actions/__init__.py b/src/mcp_cli/commands/actions/__init__.py deleted file mode 100644 index 1f31a90b..00000000 --- a/src/mcp_cli/commands/actions/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# src/mcp_cli/commands/actions/__init__.py -""" -Command actions for MCP CLI. - -This module contains the business logic implementations -for all commands - the actual work that gets done. -""" - -# Import action functions -from .servers import servers_action_async -from .models import model_action_async -from .providers import provider_action_async -from .resources import resources_action_async -from .prompts import prompts_action_async -from .tools import tools_action_async - -__all__ = [ - "servers_action_async", - "model_action_async", - "provider_action_async", - "resources_action_async", - "prompts_action_async", - "tools_action_async", -] diff --git a/src/mcp_cli/commands/actions/clear.py b/src/mcp_cli/commands/actions/clear.py deleted file mode 100644 index ecbfa30f..00000000 --- a/src/mcp_cli/commands/actions/clear.py +++ /dev/null @@ -1,29 +0,0 @@ -# src/mcp_cli/commands/actions/clear.py -""" -Clear action for MCP CLI. - -Provides functionality to clear the terminal screen. - -Public functions: -* **clear_action()** - Clear the terminal screen with optional verbose output. -""" - -from __future__ import annotations - -from chuk_term.ui import output, clear_screen - - -def clear_action(*, verbose: bool = False) -> None: - """ - Clear the terminal screen. - - Args: - verbose: If True, print a confirmation message after clearing. - """ - clear_screen() - - if verbose: - output.hint("Screen cleared.") - - -__all__ = ["clear_action"] diff --git a/src/mcp_cli/commands/actions/cmd.py b/src/mcp_cli/commands/actions/cmd.py deleted file mode 100644 index 931b8496..00000000 --- a/src/mcp_cli/commands/actions/cmd.py +++ /dev/null @@ -1,449 +0,0 @@ -"""Command mode actions for Unix-friendly automation.""" - -from __future__ import annotations - -import json -import sys -from pathlib import Path -from typing import Any - -from chuk_term.ui import output -from mcp_cli.commands.models.cmd import MessageRole, Message - - -async def cmd_action_async( - input_file: str | None = None, - output_file: str | None = None, - prompt: str | None = None, - tool: str | None = None, - tool_args: str | None = None, - system_prompt: str | None = None, - raw: bool = False, - single_turn: bool = False, - max_turns: int = 30, -) -> None: - """ - Execute command mode operations for automation and scripting. - - Args: - input_file: Input file path (use "-" for stdin) - output_file: Output file path (use "-" for stdout) - prompt: Prompt text to use - tool: Tool name to execute - tool_args: Tool arguments as JSON string - system_prompt: Custom system prompt - raw: Output raw response without formatting - single_turn: Disable multi-turn conversation - max_turns: Maximum conversation turns - """ - from mcp_cli.context import get_context - - try: - # Get the initialized context - context = get_context() - if not context or not context.tool_manager: - output.error( - "Context not initialized. This command requires a tool manager." - ) - return - - # Handle tool execution mode - if tool: - await _execute_tool_direct( - tool_name=tool, - tool_args_json=tool_args, - output_file=output_file, - raw=raw, - ) - return - - # Handle prompt mode with LLM - if prompt or input_file: - await _execute_prompt_mode( - input_file=input_file, - output_file=output_file, - prompt=prompt, - system_prompt=system_prompt, - raw=raw, - single_turn=single_turn, - max_turns=max_turns, - ) - return - - # No mode specified - output.error("No operation specified. Use --tool or --prompt/--input") - output.hint("Examples:") - output.info(" mcp-cli cmd --tool list_tables") - output.info( - ' mcp-cli cmd --tool read_query --tool-args \'{"query": "SELECT * FROM users"}\'' - ) - output.info(" echo 'Analyze this' | mcp-cli cmd --input - --output result.txt") - output.info(" mcp-cli cmd --prompt 'Summarize the data' --input data.txt") - - except Exception as e: - output.error(f"Command execution failed: {e}") - raise - - -async def _execute_tool_direct( - tool_name: str, - tool_args_json: str | None, - output_file: str | None, - raw: bool, -) -> None: - """Execute a tool directly without LLM interaction.""" - from mcp_cli.context import get_context - - context = get_context() - tool_manager = context.tool_manager - - if not tool_manager: - output.error("Tool manager not initialized") - return - - # Parse tool arguments - tool_args = {} - if tool_args_json: - try: - tool_args = json.loads(tool_args_json) - except json.JSONDecodeError as e: - output.error(f"Invalid JSON in tool arguments: {e}") - return - - # Execute the tool - try: - if not raw: - output.info(f"Executing tool: {tool_name}") - - tool_call_result = await tool_manager.execute_tool(tool_name, tool_args) - - # Check for errors - if not tool_call_result.success or tool_call_result.error: - output.error(f"Tool execution failed: {tool_call_result.error}") - return - - # Extract the actual result - result_data = tool_call_result.result - - # Format output - if raw: - result_str = ( - json.dumps(result_data) - if not isinstance(result_data, str) - else result_data - ) - else: - result_str = ( - json.dumps(result_data, indent=2) - if not isinstance(result_data, str) - else result_data - ) - - # Write output - if output_file and output_file != "-": - Path(output_file).write_text(result_str) - if not raw: - output.success(f"Output written to: {output_file}") - else: - # Write to stdout - print(result_str) - - except Exception as e: - output.error(f"Tool execution failed: {e}") - raise - - -async def _execute_prompt_mode( - input_file: str | None, - output_file: str | None, - prompt: str | None, - system_prompt: str | None, - raw: bool, - single_turn: bool, - max_turns: int, -) -> None: - """Execute prompt mode with LLM interaction.""" - from mcp_cli.context import get_context - - context = get_context() - - # Read input - input_text = "" - if input_file: - if input_file == "-": - # Read from stdin - input_text = sys.stdin.read() - else: - input_text = Path(input_file).read_text() - - # Build the full prompt - if prompt and input_text: - full_prompt = f"{prompt}\n\nInput:\n{input_text}" - elif prompt: - full_prompt = prompt - elif input_text: - full_prompt = input_text - else: - output.error("No prompt or input provided") - return - - # Get the LLM client - use the model_manager from context - try: - # Use the model manager from context which has the correct provider/model - model_manager = context.model_manager - if not model_manager: - # Fallback: create new one if context doesn't have it - from mcp_cli.model_management import ModelManager - - model_manager = ModelManager() - # Set it to the correct provider/model from context - model_manager.switch_model(context.provider, context.model) - - client = model_manager.get_client( - provider=context.provider, model=context.model - ) - - if not client: - output.error( - f"Failed to get LLM client for {context.provider}/{context.model}" - ) - return - except Exception as e: - output.error(f"Failed to initialize LLM client: {e}") - return - - # Build messages using Pydantic models - messages_models: list[Message] = [] - if system_prompt: - messages_models.append(Message(role=MessageRole.SYSTEM, content=system_prompt)) - messages_models.append(Message(role=MessageRole.USER, content=full_prompt)) - - # Convert to dict only for external LLM API - messages = [msg.model_dump(exclude_none=True) for msg in messages_models] - - # Execute the conversation - try: - if not raw: - output.info(f"Processing with {context.provider}/{context.model}...") - - # Get available tools - tools = None - if context.tool_manager and not single_turn: - tools = await context.tool_manager.get_tools_for_llm() - - # Make the LLM call using chuk-llm interface - response = await client.create_completion( - model=context.model, - messages=messages, - tools=tools, - max_tokens=4096, - ) - - # Extract the response - chuk-llm returns a dict - result_text = response.get("response", "") - tool_calls = response.get("tool_calls", []) - - # Handle tool calls if present - if tool_calls and not single_turn: - # Execute tools and continue conversation - result_text = await _handle_tool_calls( - client=client, - messages=messages, - tool_calls=tool_calls, - response_text=result_text, - max_turns=max_turns, - raw=raw, - ) - - # Write output - if output_file and output_file != "-": - Path(output_file).write_text(result_text) - if not raw: - output.success(f"Output written to: {output_file}") - else: - # Write to stdout - print(result_text) - - except Exception as e: - output.error(f"LLM execution failed: {e}") - raise - - -async def _handle_tool_calls( - client, - messages: list[dict[str, Any]], - tool_calls: list, - response_text: str, - max_turns: int, - raw: bool, -) -> str: - """Handle tool calls in multi-turn conversation. - - Note: messages list is dict format for LLM API compatibility. - We maintain it as dicts since it's passed directly to external LLM client. - """ - from mcp_cli.context import get_context - - context = get_context() - tool_manager = context.tool_manager - - if not tool_manager: - output.error("Tool manager not initialized") - return response_text - - # Add assistant message with tool calls (dict format for LLM API) - messages.append( - { - "role": MessageRole.ASSISTANT.value, - "content": response_text, - "tool_calls": tool_calls, - } - ) - - # Execute each tool call - for tool_call in tool_calls: - # Handle dict format from chuk-llm - if isinstance(tool_call, dict): - tool_name = tool_call.get("function", {}).get("name", "") - tool_args_str = tool_call.get("function", {}).get("arguments", "{}") - tool_call_id = tool_call.get("id", "") - else: - # Handle object format - tool_name = tool_call.function.name - tool_args_str = tool_call.function.arguments - tool_call_id = tool_call.id - - # Parse arguments - if isinstance(tool_args_str, str): - tool_args = json.loads(tool_args_str) - else: - tool_args = tool_args_str - - if not raw: - output.info(f"Executing tool: {tool_name}") - - try: - tool_call_result = await tool_manager.execute_tool(tool_name, tool_args) - # Extract result data and format as string - result_data = ( - tool_call_result.result - if tool_call_result.success - else f"Error: {tool_call_result.error}" - ) - result_str = ( - json.dumps(result_data) - if not isinstance(result_data, str) - else result_data - ) - - # Add tool result to messages - messages.append( - { - "role": MessageRole.TOOL.value, - "tool_call_id": tool_call_id, - "name": tool_name, - "content": result_str, - } - ) - except Exception as e: - error_msg = f"Tool execution failed: {e}" - output.error(error_msg) - messages.append( - { - "role": MessageRole.TOOL.value, - "tool_call_id": tool_call_id, - "name": tool_name, - "content": f"Error: {error_msg}", - } - ) - - # Continue conversation - turns = 1 - while turns < max_turns: - tools = await tool_manager.get_tools_for_llm() if tool_manager else None - response = await client.create_completion( - model=context.model, - messages=messages, - tools=tools, - max_tokens=4096, - ) - - # Extract response from dict - response_text = response.get("response", "") - response_tool_calls = response.get("tool_calls", []) - - # If no more tool calls, we're done - if not response_tool_calls: - return response_text - - # Add assistant message and execute tools - messages.append( - { - "role": MessageRole.ASSISTANT.value, - "content": response_text, - "tool_calls": response_tool_calls, - } - ) - - # Execute tool calls - for tool_call in response_tool_calls: - # Handle dict format - if isinstance(tool_call, dict): - tool_name = tool_call.get("function", {}).get("name", "") - tool_args_str = tool_call.get("function", {}).get("arguments", "{}") - tool_call_id = tool_call.get("id", "") - else: - tool_name = tool_call.function.name - tool_args_str = tool_call.function.arguments - tool_call_id = tool_call.id - - # Parse arguments - if isinstance(tool_args_str, str): - tool_args = json.loads(tool_args_str) - else: - tool_args = tool_args_str - - if not raw: - output.info(f"Executing tool: {tool_name}") - - try: - tool_call_result = await tool_manager.execute_tool(tool_name, tool_args) - # Extract result data and format as string - result_data = ( - tool_call_result.result - if tool_call_result.success - else f"Error: {tool_call_result.error}" - ) - result_str = ( - json.dumps(result_data) - if not isinstance(result_data, str) - else result_data - ) - - messages.append( - { - "role": MessageRole.TOOL.value, - "tool_call_id": tool_call_id, - "name": tool_name, - "content": result_str, - } - ) - except Exception as e: - error_msg = f"Tool execution failed: {e}" - output.error(error_msg) - messages.append( - { - "role": MessageRole.TOOL.value, - "tool_call_id": tool_call_id, - "name": tool_name, - "content": f"Error: {error_msg}", - } - ) - - turns += 1 - - # Max turns reached - if not raw: - output.warning(f"Max turns ({max_turns}) reached") - - return response_text diff --git a/src/mcp_cli/commands/actions/exit.py b/src/mcp_cli/commands/actions/exit.py deleted file mode 100644 index 50694c14..00000000 --- a/src/mcp_cli/commands/actions/exit.py +++ /dev/null @@ -1,38 +0,0 @@ -# src/mcp_cli/commands/actions/exit.py -""" -Exit action for MCP CLI. - -Provides functionality to cleanly terminate the MCP CLI session. - -Public functions: -* **exit_action()** - Exit the application with cleanup. -""" - -from __future__ import annotations - -import sys -from chuk_term.ui import output, restore_terminal - - -def exit_action(interactive: bool = True) -> bool: - """ - Cleanly exit the MCP CLI session. - - Args: - interactive: If True, return to allow outer loop to break. - If False, call sys.exit(0) to terminate process. - - Returns: - True when interactive mode (to signal loop break). - Never returns when non-interactive (process exits). - """ - output.info("Exiting… Goodbye!") - restore_terminal() - - if not interactive: - sys.exit(0) - - return True - - -__all__ = ["exit_action"] diff --git a/src/mcp_cli/commands/actions/help.py b/src/mcp_cli/commands/actions/help.py deleted file mode 100644 index 6bdcd7d6..00000000 --- a/src/mcp_cli/commands/actions/help.py +++ /dev/null @@ -1,124 +0,0 @@ -# src/mcp_cli/commands/actions/help.py -""" -Help command for MCP CLI. - -Displays help information for commands in both chat and CLI modes. -""" - -from __future__ import annotations -from typing import Any - -from chuk_term.ui import output, format_table -from mcp_cli.commands.registry import registry - - -def help_action(command_name: str | None = None, console: Any = None) -> None: - """ - Display help for a specific command or all commands. - - Args: - command_name: Name of command to get help for. If None, shows all commands. - console: Rich console object (optional, for compatibility with interactive mode) - """ - # Note: console argument is accepted for backward compatibility but not used - # The new implementation uses the UI output module instead - - commands = _get_commands() - - if command_name: - _show_command_help(command_name, commands) - else: - _show_all_commands(commands) - - -def _get_commands() -> dict[str, object]: - """Get available commands from the unified registry.""" - commands: dict[str, object] = {} - for cmd in registry.list_commands(): - commands[cmd.name] = cmd - return commands - - -def _show_command_help(command_name: str, commands: dict[str, object]) -> None: - """Show detailed help for a specific command.""" - cmd = commands.get(command_name) - - if cmd is None: - output.error(f"Unknown command: {command_name}") - return - - # Get help text - help_text = getattr(cmd, "help", "No description provided.") - - # Display command details - cmd_name = getattr(cmd, "name", command_name) - - output.panel(f"## {cmd_name}\n\n{help_text}", title="Command Help", style="cyan") - - # Show aliases if available - aliases = getattr(cmd, "aliases", []) - if aliases: - output.print(f"\n[dim]Aliases: {', '.join(aliases)}[/dim]") - - -def _show_all_commands(commands: dict[str, object]) -> None: - """Show a summary table of all available commands.""" - if not commands: - output.warning("No commands available") - return - - # Build table data - table_data = [] - for name, cmd in sorted(commands.items()): - # Get help text - help_text = getattr(cmd, "help", "") - - # Extract first meaningful line from help text - desc = _extract_description(help_text) - - # Get aliases - aliases = "-" - cmd_aliases = getattr(cmd, "aliases", []) - if cmd_aliases: - aliases = ", ".join(cmd_aliases) - - table_data.append({"Command": name, "Aliases": aliases, "Description": desc}) - - # Display table - table = format_table( - table_data, - title="Available Commands", - columns=["Command", "Aliases", "Description"], - ) - output.print_table(table) - - output.hint( - "\nType 'help ' for detailed information on a specific command." - ) - - # Add provider management tips - output.print() - output.tip("💡 Provider Management:") - output.info(" • List all: /providers") - output.info(" • Add custom: /provider add [model]") - output.info(" • Switch: /provider ") - output.info(" • Remove: /provider remove ") - output.print() - output.hint("Custom providers need API keys as environment variables:") - output.info(" Pattern: {PROVIDER_NAME}_API_KEY") - output.info(" Example: 'localai' → export LOCALAI_API_KEY=your-key") - output.info(" Example: 'my-llm' → export MY_LLM_API_KEY=your-key") - - -def _extract_description(help_text: str | None) -> str: - """Extract a one-line description from help text.""" - if not help_text: - return "No description" - - # Find first non-empty line that doesn't start with "usage" - for line in help_text.splitlines(): - line = line.strip() - if line and not line.lower().startswith("usage"): - return line - - return "No description" diff --git a/src/mcp_cli/commands/actions/models.py b/src/mcp_cli/commands/actions/models.py deleted file mode 100644 index bef32e1b..00000000 --- a/src/mcp_cli/commands/actions/models.py +++ /dev/null @@ -1,279 +0,0 @@ -# src/mcp_cli/commands/actions/models.py -""" -Model management command for MCP-CLI. - -Commands: - /model → show current model & provider - /model list → list all available models - /model → switch to a different model - /model refresh → refresh model discovery -""" - -from __future__ import annotations - - -from chuk_term.ui import output, format_table -from mcp_cli.model_management import ModelManager -from mcp_cli.utils.async_utils import run_blocking -from mcp_cli.utils.llm_probe import LLMProbe -from mcp_cli.context import get_context, ApplicationContext -from mcp_cli.commands.models import ModelActionParams - - -async def model_action_async(params: ModelActionParams) -> None: - """ - Handle model management commands. - - Args: - params: Model action parameters - - Example: - >>> params = ModelActionParams(args=["list"], detailed=True) - >>> await model_action_async(params) - """ - # Get context and model manager - context = get_context() - model_manager = context.model_manager - - if not model_manager: - output.error("Model manager not available") - return - - provider = model_manager.get_active_provider() - current_model = model_manager.get_active_model() - - # No arguments - show current status - if not params.args: - await _show_status(model_manager, current_model, provider) - return - - command = params.args[0].lower() - - # Handle subcommands - if command == "list": - await _list_models(model_manager, provider, current_model) - elif command == "refresh": - await _refresh_models(model_manager, provider) - else: - # Assume it's a model name to switch to - await _switch_model( - params.args[0], model_manager, provider, current_model, context - ) - - -async def _show_status(model_manager: ModelManager, model: str, provider: str) -> None: - """Show current model status with visual appeal.""" - output.rule("[bold]🤖 Model Status[/bold]", style="primary") - output.print() - - # Show current status with formatting - output.print(f" [bold]Provider:[/bold] {provider}") - output.print(f" [bold]Model:[/bold] {model}") - - # Get available models - available_models = model_manager.get_available_models(provider) - - if not available_models: - output.print() - output.warning(" ⚠️ No models found for current provider") - return - - # Show first few available models with visual hierarchy - output.print() - output.print(" [bold]Available models:[/bold]") - count = 0 - for available_model in available_models: - if available_model == model: - output.success(f" ✓ {available_model} [dim](current)[/dim]") - else: - output.print(f" • {available_model}") - - count += 1 - if count >= 10: - remaining = len(available_models) - 10 - if remaining > 0: - output.print(f" [dim]... and {remaining} more[/dim]") - break - - # Show Ollama status if applicable - if provider.lower() == "ollama": - await _show_ollama_status(model_manager) - - output.print() - output.tip( - "💡 Use: /model to switch | /models to list all | /model refresh to discover" - ) - - -async def _list_models( - model_manager: ModelManager, provider: str, current_model: str -) -> None: - """List all available models.""" - available_models = model_manager.get_available_models(provider) - - if not available_models: - output.error(f"No models found for provider '{provider}'") - return - - # Build table data - table_data = [] - - # Get local Ollama models if applicable - local_models: list[str] = [] - if provider.lower() == "ollama": - ollama_running, local_models = await _check_local_ollama() - - # Get static models from config - static_models = set() - try: - # Just get the available models - we don't differentiate static anymore - static_models = set(available_models) - except Exception: - pass - - # Build rows - for model_name in available_models: - # Determine status and type - if model_name == current_model: - status = "→ Current" - else: - status = "" - - if model_name in static_models: - model_type = "Static" - elif model_name in local_models: - model_type = "Local" - else: - model_type = "Discovered" - - # Add info - info = [] - if ":latest" in model_name: - info.append("latest") - if "embed" in model_name.lower(): - info.append("embedding") - - table_data.append( - { - "Status": status, - "Model": model_name, - "Type": model_type, - "Info": ", ".join(info) if info else "-", - } - ) - - # Display table - table = format_table( - table_data, - title=f"Models for {provider} ({len(available_models)} total)", - columns=["Status", "Model", "Type", "Info"], - ) - output.print_table(table) - - output.tip("Use '/model ' to switch to any model") - - -async def _refresh_models(model_manager: ModelManager, provider: str) -> None: - """Refresh model discovery.""" - with output.loading(f"Refreshing models for {provider}..."): - try: - # refresh_models returns count of new models discovered - new_count = model_manager.refresh_models(provider) - after_count = len(model_manager.get_available_models(provider)) - - if new_count > 0: - output.success(f"Discovered {new_count} new models!") - else: - output.info("No new models discovered") - - output.print(f"Total models: {after_count}") - - except Exception as e: - output.error(f"Refresh error: {e}") - - -async def _switch_model( - new_model: str, - model_manager: ModelManager, - provider: str, - current_model: str, - context: ApplicationContext, -) -> None: - """Attempt to switch to a new model.""" - with output.loading(f"Testing model '{new_model}'..."): - try: - # Validate model (note: validate_model takes model, provider order) - is_valid = model_manager.validate_model(new_model, provider) - - if not is_valid: - output.error(f"Model not available: {new_model}") - - # Show suggestions - available = model_manager.get_available_models(provider) - if available: - suggestions = available[:5] - output.tip(f"Available models: {', '.join(suggestions)}") - if len(available) > 5: - output.print(f"... and {len(available) - 5} more") - return - - # Test the model - async with LLMProbe(model_manager, suppress_logging=True) as probe: - result = await probe.test_model(new_model) - - if result.success: - # Switch successful - model_manager.switch_model(provider, new_model) - # Update the ApplicationContext attributes directly - context.model = new_model - # context doesn't have a client attribute - context.model_manager = model_manager - output.success(f"Switched to model: {new_model}") - else: - error_msg = result.error_message or "Model test failed" - output.error(f"Model test failed: {error_msg}") - output.warning(f"Keeping current model: {current_model}") - - except Exception as e: - output.error(f"Model switch failed: {e}") - output.warning(f"Keeping current model: {current_model}") - - -async def _show_ollama_status(model_manager: ModelManager) -> None: - """Show Ollama-specific status information.""" - try: - ollama_running, local_models = await _check_local_ollama() - - if ollama_running: - available = len(model_manager.get_available_models("ollama")) - # Discovery is always enabled in the new architecture - status = f"Ollama: {len(local_models)} local, {available} accessible | Discovery: ✅" - output.info(f"\n{status}") - else: - output.hint("\nOllama: Not running | Use 'ollama serve' to start") - - except Exception: - pass - - -async def _check_local_ollama() -> tuple[bool, list[str]]: - """Check if Ollama is running and get local models.""" - try: - import httpx - - async with httpx.AsyncClient(timeout=3.0) as client: - response = await client.get("http://localhost:11434/api/tags") - response.raise_for_status() - data = response.json() - - models = [m["name"] for m in data.get("models", [])] - return True, models - - except Exception: - return False, [] - - -def model_action(args: list[str]) -> None: - """Synchronous wrapper for model_action_async.""" - params = ModelActionParams(args=args) - run_blocking(model_action_async(params)) diff --git a/src/mcp_cli/commands/actions/ping.py b/src/mcp_cli/commands/actions/ping.py deleted file mode 100644 index b2efc3a2..00000000 --- a/src/mcp_cli/commands/actions/ping.py +++ /dev/null @@ -1,210 +0,0 @@ -# src/mcp_cli/commands/actions/ping.py -""" -Ping MCP servers to check connectivity and measure latency. -""" - -from __future__ import annotations - -import asyncio -import logging -import time -from typing import Any, Sequence - -from chuk_mcp.protocol.messages import send_ping - -from chuk_term.ui import output, format_table -from mcp_cli.tools.manager import ToolManager -from mcp_cli.utils.async_utils import run_blocking - -logger = logging.getLogger(__name__) - - -async def ping_action_async( - tm: ToolManager, - server_names: dict[int, str] | None = None, - targets: Sequence[str] = (), -) -> bool: - """ - Ping all or specified MCP servers. - - Args: - tm: ToolManager instance - server_names: Optional mapping of server indices to names - targets: Specific servers to ping (empty = all) - - Returns: - True if at least one server was pinged - """ - streams = list(tm.get_streams()) - - # Get server info - server_infos = await tm.get_server_info() - - # Build ping tasks - tasks = [] - for idx, (read_stream, write_stream) in enumerate(streams): - name = _get_server_name(idx, server_names, server_infos) - - # Apply target filter if specified - if targets and not _matches_target(idx, name, targets): - continue - - task = asyncio.create_task( - _ping_server(idx, name, read_stream, write_stream), name=name - ) - tasks.append(task) - - # Check if we have servers to ping - if not tasks: - output.error("No matching servers found") - output.hint("Use 'servers' command to list available servers") - return False - - # Execute pings - with output.loading(f"Pinging {len(tasks)} server(s)..."): - results = await asyncio.gather(*tasks) - - # Display results - _display_results(results) - return True - - -async def _ping_server( - idx: int, - name: str, - read_stream: Any, - write_stream: Any, - timeout: float = 5.0, -) -> tuple[str, bool, float]: - """ - Ping a single server and measure latency. - - Args: - idx: Server index - name: Server name - read_stream: Read stream for server - write_stream: Write stream for server - timeout: Ping timeout in seconds - - Returns: - Tuple of (name, success, latency_ms) - """ - start = time.perf_counter() - - try: - success = await asyncio.wait_for(send_ping(read_stream, write_stream), timeout) - except asyncio.TimeoutError: - logger.debug(f"Ping timeout for server {name}") - success = False - except Exception as e: - logger.debug(f"Ping failed for server {name}: {e}") - success = False - - latency_ms = (time.perf_counter() - start) * 1000 - return name, success, latency_ms - - -def _get_server_name( - idx: int, - explicit_names: dict[int, str] | None, - server_infos: list, -) -> str: - """ - Get the display name for a server. - - Priority: - 1. Explicit name from server_names dict - 2. Name from server info - 3. Generic "server-{idx}" - """ - if explicit_names and idx in explicit_names: - return explicit_names[idx] - - if idx < len(server_infos): - name: str = server_infos[idx].name - return name - - return f"server-{idx}" - - -def _matches_target(idx: int, name: str, targets: Sequence[str]) -> bool: - """ - Check if a server matches any of the target filters. - - Args: - idx: Server index - name: Server name - targets: Target filters - - Returns: - True if server matches any target - """ - for target in targets: - target_lower = target.lower() - if target_lower in (str(idx), name.lower()): - return True - return False - - -def _display_results(results: list[tuple[str, bool, float]]) -> None: - """ - Display ping results in a formatted table. - - Args: - results: List of (name, success, latency_ms) tuples - """ - # Sort results by server name - sorted_results = sorted(results, key=lambda x: x[0].lower()) - - # Build table data - table_data = [] - successful_count = 0 - total_latency = 0.0 - - for name, success, latency_ms in sorted_results: - if success: - status = "✓ Online" - latency = f"{latency_ms:.1f} ms" - successful_count += 1 - total_latency += latency_ms - else: - status = "✗ Offline" - latency = "-" - - table_data.append({"Server": name, "Status": status, "Latency": latency}) - - # Display table - table = format_table( - table_data, title="Server Ping Results", columns=["Server", "Status", "Latency"] - ) - output.print_table(table) - - # Display summary - output.print() - if successful_count > 0: - avg_latency = total_latency / successful_count - output.success(f"{successful_count}/{len(results)} servers online") - output.info(f"Average latency: {avg_latency:.1f} ms") - else: - output.error("All servers are offline") - - -def ping_action( - tm: ToolManager, - server_names: dict[int, str] | None = None, - targets: Sequence[str] = (), -) -> bool: - """ - Synchronous wrapper for ping_action_async. - - Args: - tm: ToolManager instance - server_names: Optional mapping of server indices to names - targets: Specific servers to ping - - Returns: - True if at least one server was pinged - """ - return run_blocking( - ping_action_async(tm, server_names=server_names, targets=targets) - ) diff --git a/src/mcp_cli/commands/actions/prompts.py b/src/mcp_cli/commands/actions/prompts.py deleted file mode 100644 index 7d5262d7..00000000 --- a/src/mcp_cli/commands/actions/prompts.py +++ /dev/null @@ -1,106 +0,0 @@ -# src/mcp_cli/commands/actions/prompts.py -""" -Prompts action for MCP CLI. - -List prompt templates from connected MCP servers. - -Public functions: -* **prompts_action_async()** - Async function to list prompts. -* **prompts_action()** - Sync wrapper for legacy CLI paths. -* **prompts_action_cmd()** - Alias for backward compatibility. -""" - -from __future__ import annotations - -import inspect - -from mcp_cli.utils.async_utils import run_blocking -from chuk_term.ui import output, format_table -from mcp_cli.context import get_context -from mcp_cli.commands.models import PromptInfoResponse - - -async def prompts_action_async() -> list[PromptInfoResponse]: - """ - Fetch and display prompt templates from all connected MCP servers. - - Returns: - List of prompt response models from all servers. - """ - context = get_context() - tm = context.tool_manager - - if not tm: - output.error("No tool manager available") - return [] - - try: - maybe = tm.list_prompts() - prompts = await maybe if inspect.isawaitable(maybe) else maybe - except Exception as exc: # noqa: BLE001 - output.error(f"{exc}") - return [] - - prompts = prompts or [] - if not prompts: - output.info("No prompts recorded.") - return [] - - # Convert to Pydantic models - prompt_models = [] - table_data = [] - columns = ["Server", "Name", "Description"] - - for item in prompts: - # Create Pydantic model - prompt_model = PromptInfoResponse( - name=item.get("name", "-"), - description=item.get("description"), - arguments=item.get("arguments", []), - server=item.get("server", "-"), - ) - prompt_models.append(prompt_model) - - # Build table row - table_data.append( - { - "Server": prompt_model.server, - "Name": prompt_model.name, - "Description": prompt_model.description or "-", - } - ) - - # Display table - table = format_table(table_data, title="Prompts", columns=columns) - output.print_table(table) - return prompt_models - - -def prompts_action() -> list[PromptInfoResponse]: - """ - Sync wrapper for prompts_action_async. - - Returns: - List of prompt response models from all servers. - - Raises: - RuntimeError: If called from inside an active event loop. - """ - return run_blocking(prompts_action_async()) - - -async def prompts_action_cmd() -> list[PromptInfoResponse]: - """ - Alias for prompts_action_async (backward compatibility). - - Returns: - List of prompt dictionaries from all servers. - """ - return await prompts_action_async() - - -__all__ = [ - "prompts_action_async", - "prompts_action", - "prompts_action_cmd", -] diff --git a/src/mcp_cli/commands/actions/providers.py b/src/mcp_cli/commands/actions/providers.py deleted file mode 100644 index b1a31fc8..00000000 --- a/src/mcp_cli/commands/actions/providers.py +++ /dev/null @@ -1,779 +0,0 @@ -# src/mcp_cli/commands/actions/providers.py -""" -Provider command with Pydantic models (no dict goop). -""" - -from __future__ import annotations -import subprocess -from mcp_cli.model_management import ModelManager -from chuk_term.ui import output, format_table -from mcp_cli.context import get_context, ApplicationContext -from mcp_cli.commands.models import ProviderActionParams -from mcp_cli.commands.models.provider import ( - ProviderData, - ProviderStatus, - TokenSource, -) -from mcp_cli.commands.enums import ProviderCommand - - -def _check_ollama_running() -> tuple[bool, int]: - """ - Check if Ollama is running and return status with model count. - Returns (is_running, model_count) - """ - try: - result = subprocess.run( - ["ollama", "list"], capture_output=True, text=True, timeout=5 - ) - if result.returncode == 0: - # Count actual models (skip header line and empty lines) - lines = result.stdout.strip().split("\n") - model_lines = [line for line in lines[1:] if line.strip()] - return True, len(model_lines) - return False, 0 - except (FileNotFoundError, subprocess.TimeoutExpired, Exception): - return False, 0 - - -def _dict_to_provider_data(provider_name: str, data: dict) -> ProviderData: - """Convert a dict to ProviderData model for compatibility.""" - if isinstance(data, ProviderData): - return data - - return ProviderData( - name=provider_name, - has_api_key=data.get("has_api_key", False), - token_source=TokenSource(data.get("token_source", "none")) - if data.get("token_source") in ["env", "storage", "none"] - else TokenSource.NONE, - models=data.get("models", data.get("available_models", [])), - default_model=data.get("default_model"), - baseline_features=data.get("baseline_features", []), - is_custom=data.get("is_custom", False), - api_base=data.get("api_base"), - discovery_enabled=data.get("discovery_enabled", False), - error=data.get("error"), - ) - - -def _get_provider_status_enhanced( - provider_name: str, provider_data: ProviderData -) -> ProviderStatus: - """ - Enhanced status logic that handles all provider types correctly. - Returns ProviderStatus with icon, text, and reason. - """ - # Handle Ollama specially - it doesn't need API keys - if provider_name.lower() == "ollama": - is_running, model_count = _check_ollama_running() - if is_running: - return ProviderStatus( - icon="✅", - text="Ready", - reason=f"Running ({model_count} models)", - ) - else: - return ProviderStatus( - icon="❌", - text="Not Running", - reason="Ollama service not accessible", - ) - - # For API-based providers, check configuration - if not provider_data.has_api_key: - return ProviderStatus( - icon="❌", - text="Not Configured", - reason="No API key", - ) - - # If has API key, check model availability - model_count = provider_data.model_count - - # Create status reason with token source info - source_info = "" - if provider_data.token_source == TokenSource.ENV: - source_info = " (env)" - elif provider_data.token_source == TokenSource.STORAGE: - source_info = " (storage)" - - if model_count == 0: - return ProviderStatus( - icon="⚠️", - text="Partial Setup", - reason=f"API key set but no models found{source_info}", - ) - - return ProviderStatus( - icon="✅", - text="Ready", - reason=f"Configured ({model_count} models){source_info}", - ) - - -def _get_model_count_display_enhanced( - provider_name: str, provider_data: ProviderData -) -> str: - """ - Enhanced model count display that handles Ollama and all providers correctly. - """ - # For Ollama, get live count from ollama command - if provider_name.lower() == "ollama": - is_running, live_count = _check_ollama_running() - if is_running: - return f"{live_count} models" - else: - return "Ollama not running" - - # For other providers, use the model count from ProviderData - count = provider_data.model_count - if count == 0: - return "No models found" - elif count == 1: - return "1 model" - else: - return f"{count} models" - - -def _get_features_display_enhanced(provider_data: ProviderData) -> str: - """Enhanced feature display with more comprehensive icons.""" - baseline_features = provider_data.baseline_features - - feature_icons = [] - if "streaming" in baseline_features: - feature_icons.append("📡") - if "tools" in baseline_features or "parallel_calls" in baseline_features: - feature_icons.append("🔧") - if "vision" in baseline_features: - feature_icons.append("👁️") - if "reasoning" in baseline_features: - feature_icons.append("🧠") - if "json_mode" in baseline_features: - feature_icons.append("📝") - - return "".join(feature_icons) if feature_icons else "📄" - - -def _render_list_optimized(model_manager: ModelManager) -> None: - """ - Optimized provider list using Pydantic models. - """ - # Create token manager for checking token sources - from mcp_cli.auth import TokenManager, TokenStoreBackend - from mcp_cli.constants import NAMESPACE - - try: - token_manager = TokenManager( - backend=TokenStoreBackend.AUTO, namespace=NAMESPACE, service_name="mcp-cli" - ) - except Exception: - token_manager = None - - table_data = [] - columns = [ - "Provider", - "Status", - "Token", - "Default Model", - "Models Available", - "Features", - ] - - current_provider = model_manager.get_active_provider() - - try: - # Get provider list - provider_names = model_manager.get_available_providers() - - if not provider_names: - output.error("No providers found. Check chuk-llm installation.") - return - - # Build provider info using Pydantic models - all_providers_data: dict[str, ProviderData] = {} - for provider_name in provider_names: - try: - models = model_manager.get_available_models(provider_name) - default_model = ( - model_manager.get_default_model(provider_name) if models else None - ) - all_providers_data[provider_name] = ProviderData( - name=provider_name, - models=models or [], - default_model=default_model, - ) - except Exception as e: - all_providers_data[provider_name] = ProviderData( - name=provider_name, - error=str(e), - ) - - except Exception as e: - output.error(f"Error getting provider list: {e}") - return - - # Sort providers to put current one first, then alphabetically - provider_items = list(all_providers_data.items()) - provider_items.sort(key=lambda x: (x[0] != current_provider, x[0])) - - for provider_name, provider_data in provider_items: - # Handle error cases - if provider_data.error: - table_data.append( - { - "Provider": provider_name, - "Status": "Error", - "Token": "-", - "Default Model": "-", - "Models Available": "-", - "Features": provider_data.error[:20] + "...", - } - ) - continue - - # Mark current provider - display_name = ( - f"→ {provider_name}" - if provider_name == current_provider - else f" {provider_name}" - ) - - # Check token source (chuk_llm doesn't provide this) - if provider_name.lower() != "ollama" and token_manager: - from mcp_cli.auth.provider_tokens import check_provider_token_status - - token_status = check_provider_token_status(provider_name, token_manager) - token_source_str = token_status.get("source", "none") - if token_source_str == "env": - provider_data.token_source = TokenSource.ENV - elif token_source_str == "storage": - provider_data.token_source = TokenSource.STORAGE - - # Enhanced status using Pydantic model - status = _get_provider_status_enhanced(provider_name, provider_data) - status_display = f"{status.icon} {status.text}" - - # Display token source - if provider_name.lower() == "ollama": - token_display = "-" - elif provider_data.token_source == TokenSource.ENV: - token_display = "🌍 env" - elif provider_data.token_source == TokenSource.STORAGE: - token_display = "🔐 storage" - else: - token_display = "❌ none" - - # Default model with proper fallback - default_model = provider_data.default_model or "-" - if default_model in ("None", "null"): - default_model = "-" - - # Enhanced model count display - models_display = _get_model_count_display_enhanced(provider_name, provider_data) - - # Enhanced features - features_display = _get_features_display_enhanced(provider_data) - - table_data.append( - { - "Provider": display_name, - "Status": status_display, - "Token": token_display, - "Default Model": default_model, - "Models Available": models_display, - "Features": features_display, - } - ) - - # Create and display table with visual styling - output.rule("[bold]🌐 Available Providers[/bold]", style="primary") - - table = format_table( - table_data, - title=None, # Using rule for title - columns=columns, - ) - output.print_table(table) - output.print() - - # Show comprehensive tips for provider management - output.tip("💡 Provider Management:") - output.info(" • Switch: /provider ") - output.info(" • Add custom: /provider add [model]") - output.info(" • Remove: /provider remove ") - output.info(" • List custom: /provider custom") - output.hint( - " Custom providers use env vars: {NAME}_API_KEY (e.g., LOCALAI_API_KEY)" - ) - - # Show helpful tips based on current state - inactive_providers = [] - inactive_custom_providers = [] - custom_count = 0 - for name, data in all_providers_data.items(): - if data.is_custom: - custom_count += 1 - if not data.error: - status = _get_provider_status_enhanced(name, data) - if status.icon == "❌": - inactive_providers.append(name) - if data.is_custom: - inactive_custom_providers.append(name) - - # Prioritize showing custom provider hints if any are unconfigured - if inactive_custom_providers: - # Show hint for custom provider - first_custom = inactive_custom_providers[0] - output.hint( - f"Configure API key: export {first_custom.upper().replace('-', '_')}_API_KEY=your-key" - ) - elif inactive_providers: - # Show hint for built-in provider - first_inactive = inactive_providers[0] - env_var_map = { - "anthropic": "ANTHROPIC_API_KEY", - "openai": "OPENAI_API_KEY", - "gemini": "GEMINI_API_KEY", - "groq": "GROQ_API_KEY", - "mistral": "MISTRAL_API_KEY", - "perplexity": "PERPLEXITY_API_KEY", - "openrouter": "OPENROUTER_API_KEY", - "togetherai": "TOGETHER_API_KEY", - "deepseek": "DEEPSEEK_API_KEY", - "azure_openai": "AZURE_OPENAI_API_KEY", - "watsonx": "WATSONX_API_KEY", - } - env_var = env_var_map.get(first_inactive, f"{first_inactive.upper()}_API_KEY") - output.hint(f"Configure API key: export {env_var}=your-key") - - # Suggest adding custom providers if none exist - if custom_count == 0: - output.hint( - "Add OpenAI-compatible providers: /provider add localai http://localhost:8080/v1" - ) - - -def _render_diagnostic_optimized( - model_manager: ModelManager, target: str | None -) -> None: - """Optimized diagnostic that shows detailed status for providers.""" - if target: - providers_to_test = [target] if model_manager.validate_provider(target) else [] - if not providers_to_test: - output.error(f"Unknown provider: {target}") - available = ", ".join(model_manager.get_available_providers()) - output.warning(f"Available providers: {available}") - return - else: - providers_to_test = model_manager.get_available_providers() - - table_data = [] - columns = ["Provider", "Status", "Models", "Features", "Details"] - - try: - all_providers_data = model_manager.get_available_providers() - except Exception as e: - output.error(f"Error getting provider data: {e}") - return - - for provider in providers_to_test: - try: - provider_info_dict = all_providers_data.get(provider, {}) # type: ignore[attr-defined] - - # Convert to ProviderData - provider_data = _dict_to_provider_data(provider, provider_info_dict) - - # Skip if provider has errors - if provider_data.error: - table_data.append( - { - "Provider": provider, - "Status": "Error", - "Models": "-", - "Features": "-", - "Details": provider_data.error[:30] + "...", - } - ) - continue - - # Enhanced status - status = _get_provider_status_enhanced(provider, provider_data) - status_display = f"{status.icon} {status.text}" - - # Model count - models_display = _get_model_count_display_enhanced(provider, provider_data) - - # Features - features_display = _get_features_display_enhanced(provider_data) - - # Additional details - details = [] - if provider_data.api_base: - details.append(f"API: {provider_data.api_base}") - if provider_data.discovery_enabled: - details.append("Discovery: ✅") - details_str = " | ".join(details) if details else "-" - - table_data.append( - { - "Provider": provider, - "Status": status_display, - "Models": models_display, - "Features": features_display, - "Details": details_str, - } - ) - - except Exception as exc: - table_data.append( - { - "Provider": provider, - "Status": "Error", - "Models": "-", - "Features": "-", - "Details": str(exc)[:30] + "...", - } - ) - - # Create and display table using chuk-term - table = format_table(table_data, title="Provider Diagnostics", columns=columns) - output.print_table(table) - - -def _switch_provider_enhanced( - model_manager: ModelManager, - provider_name: str, - model_name: str | None, - context: ApplicationContext, -) -> None: - """Enhanced provider switching with better validation and feedback.""" - - if not model_manager.validate_provider(provider_name): - available = ", ".join(model_manager.get_available_providers()) - output.error(f"Unknown provider: {provider_name}") - output.info(f"Available providers: {available}") - return - - # Get provider info for validation - try: - all_providers_info = model_manager.get_available_providers() - provider_info_dict = all_providers_info.get(provider_name, {}) # type: ignore[attr-defined] - - # Convert to ProviderData - provider_data = _dict_to_provider_data(provider_name, provider_info_dict) - - if provider_data.error: - output.error(f"Provider error: {provider_data.error}") - return - - # Enhanced status validation - status = _get_provider_status_enhanced(provider_name, provider_data) - - if status.icon == "❌": - output.error(f"Provider not ready: {status.reason}") - - # Provide specific help - if provider_name.lower() == "ollama": - output.tip("Start Ollama with: ollama serve") - elif "No API key" in status.reason: - env_var = f"{provider_name.upper()}_API_KEY" - output.tip( - f"Set API key with: /provider set {provider_name} api_key YOUR_KEY" - ) - output.tip(f"Or set environment variable: export {env_var}=YOUR_KEY") - - return - - elif status.icon == "⚠️": - output.warning(f"{status.reason}") - output.info("Continuing anyway...") - - except Exception as e: - output.warning(f"Could not validate provider: {e}") - - # Determine target model - if model_name: - target_model = model_name - else: - # Get default model - try: - target_model = model_manager.get_default_model(provider_name) - if not target_model: - # Fallback to first available model - available_models = model_manager.get_available_models(provider_name) - target_model = available_models[0] if available_models else "default" - except Exception: - target_model = "default" - - output.info(f"Switching to {provider_name} (model: {target_model})...") - - # Perform the switch - try: - model_manager.switch_model(provider_name, target_model) - except Exception as e: - output.error(f"Failed to switch provider: {e}") - return - - # Update context (ApplicationContext object) - try: - context.provider = provider_name - context.model = target_model - # context doesn't have a client attribute, but it has model_manager - context.model_manager = model_manager - except Exception as e: - output.warning(f"Could not update client context: {e}") - - output.success(f"✅ Switched to {provider_name} (model: {target_model})") - - -# Update the main action function with enhanced sub-commands -async def provider_action_async(params: ProviderActionParams) -> None: - """ - Enhanced provider action with all optimizations applied. - - Args: - params: Provider action parameters - - Example: - >>> params = ProviderActionParams(args=["list"], detailed=True) - >>> await provider_action_async(params) - """ - # Get context and model manager - context: ApplicationContext = get_context() - model_manager = context.model_manager - - if not model_manager: - output.error("Model manager not available") - return - - def _show_status() -> None: - provider = model_manager.get_active_provider() - model = model_manager.get_active_model() - - # Get enhanced status for current provider - try: - # Create a simple ProviderData for status check - provider_data = ProviderData(name=provider) - status = _get_provider_status_enhanced(provider, provider_data) - - # Display in a beautifully formatted panel - output.rule("[bold]🔧 Provider Status[/bold]", style="primary") - output.print() - - # Create visually appealing status display - output.print(f" [bold]Provider:[/bold] {provider}") - output.print(f" [bold]Model:[/bold] {model}") - output.print(f" [bold]Status:[/bold] {status.icon} {status.text}") - - if status.icon != "✅": - output.print() - output.warning(f" ⚠️ {status.reason}") - - output.print() - - # Show available providers tip - output.tip( - "Use: /provider to switch | /providers to list all | /provider set for config" - ) - - except Exception as e: - output.info(f"Current provider: {provider}") - output.info(f"Current model : {model}") - output.warning(f"Status check failed: {e}") - - def _format_features(status: dict) -> str: - features = [] - if status.get("supports_streaming"): - features.append("📡 streaming") - if status.get("supports_tools"): - features.append("🔧 tools") - if status.get("supports_vision"): - features.append("👁️ vision") - return " ".join(features) or "📄 text only" - - # Dispatch logic - if not params.args: - _show_status() - return - - sub, *rest = params.args - sub = sub.lower() - - if sub == ProviderCommand.LIST.value: - _render_list_optimized(model_manager) - return - - if sub == ProviderCommand.CUSTOM.value: - _list_custom_providers() - return - - if sub == ProviderCommand.ADD.value and len(rest) >= 2: - # /provider add [model1 model2 ...] - name = rest[0] - api_base = rest[1] - models = rest[2:] if len(rest) > 2 else None - _add_custom_provider(name, api_base, models) - return - - if sub == ProviderCommand.REMOVE.value and rest: - # /provider remove - name = rest[0] - _remove_custom_provider(name) - return - - if sub == ProviderCommand.CONFIG.value: - _render_config(model_manager) - return - - if sub == ProviderCommand.DIAGNOSTIC.value: - target = rest[0] if rest else None - _render_diagnostic_optimized(model_manager, target) - return - - if sub == ProviderCommand.SET.value and len(rest) >= 2: - provider_name, setting = rest[0], rest[1] - value = rest[2] if len(rest) >= 3 else None - _mutate(model_manager, provider_name, setting, value) - return - - # Provider switching - provider_name = sub - model_name = rest[0] if rest else None - _switch_provider_enhanced(model_manager, provider_name, model_name, context) - - -# Keep existing helper functions but use them in the enhanced versions above -def _render_config(model_manager: ModelManager) -> None: - """Show detailed configuration - keeping your existing implementation.""" - # ... existing implementation - pass - - -def _add_custom_provider( - name: str, api_base: str, models: list[str] | None = None -) -> None: - """Add a custom OpenAI-compatible provider.""" - from mcp_cli.utils.preferences import get_preference_manager - import os - - prefs = get_preference_manager() - - # Check if provider already exists - if prefs.is_custom_provider(name): - output.error(f"Provider '{name}' already exists. Use 'update' to modify it.") - return - - # Add the provider - prefs.add_custom_provider( - name=name, - api_base=api_base, - models=models or ["gpt-4", "gpt-3.5-turbo"], - default_model=models[0] if models else "gpt-4", - ) - - # Get the expected env var name - env_var = f"{name.upper().replace('-', '_')}_API_KEY" - - output.success(f"✅ Added provider '{name}'") - output.info(f" API Base: {api_base}") - output.info(f" Models: {', '.join(models or ['gpt-4', 'gpt-3.5-turbo'])}") - - # Check if API key is set - if not os.environ.get(env_var): - output.warning("\n⚠️ API key required. Set it with:") - output.print(f" [bold]export {env_var}=your-api-key[/bold]") - output.info( - "\n The environment variable name is based on your provider name:" - ) - output.info(f" Provider '{name}' → {env_var}") - else: - output.success(f" API Key: ✅ Found in {env_var}") - - -def _remove_custom_provider(name: str) -> None: - """Remove a custom provider.""" - from mcp_cli.utils.preferences import get_preference_manager - - prefs = get_preference_manager() - - if not prefs.is_custom_provider(name): - output.error(f"Provider '{name}' is not a custom provider or doesn't exist.") - return - - if prefs.remove_custom_provider(name): - output.success(f"✅ Removed provider '{name}'") - else: - output.error(f"Failed to remove provider '{name}'") - - -def _list_custom_providers() -> None: - """List all custom providers.""" - from mcp_cli.utils.preferences import get_preference_manager - from mcp_cli.auth.provider_tokens import get_provider_token_display_status - from mcp_cli.auth import TokenManager - - prefs = get_preference_manager() - custom_providers = prefs.get_custom_providers() - - if not custom_providers: - output.info("No custom providers configured.") - output.tip("Add one with: /provider add [models...]") - return - - output.rule("[bold]🔧 Custom Providers[/bold]", style="primary") - - try: - token_manager = TokenManager() - except Exception: - token_manager = None - - table_data = [] - for name, provider_data in custom_providers.items(): - # Handle custom env var name if specified - custom_env_var = provider_data.get("env_var_name") - if custom_env_var: - # Use custom env var name instead of default - import os - - if os.environ.get(custom_env_var): - token_status = f"✅ {custom_env_var}" - else: - token_status = f"❌ {custom_env_var} not set" - else: - # Use standard hierarchical token status display - token_status = get_provider_token_display_status(name, token_manager) - - table_data.append( - { - "Provider": name, - "API Base": provider_data["api_base"], - "Models": ", ".join(provider_data.get("models", [])), - "Default": provider_data.get("default_model", "gpt-4"), - "Token": token_status, - } - ) - - table = format_table( - table_data, - title=None, - columns=["Provider", "API Base", "Models", "Default", "Token"], - ) - output.print_table(table) - - -def _mutate( - model_manager: ModelManager, provider: str, key: str, value: str | None -) -> None: - """Update provider configuration - keeping your existing implementation.""" - # ... existing implementation - pass - - -# Sync wrapper -def provider_action(args: list[str]) -> None: - """Sync wrapper for provider_action_async.""" - from mcp_cli.utils.async_utils import run_blocking - - params = ProviderActionParams(args=args) - run_blocking(provider_action_async(params)) diff --git a/src/mcp_cli/commands/actions/resources.py b/src/mcp_cli/commands/actions/resources.py deleted file mode 100644 index 4e4cb542..00000000 --- a/src/mcp_cli/commands/actions/resources.py +++ /dev/null @@ -1,108 +0,0 @@ -# src/mcp_cli/commands/actions/resources.py -""" -Resources action for MCP CLI. - -List resources (files, blobs, artifacts) from connected MCP servers. - -Public functions: -* **resources_action_async()** - Async function to list resources. -* **resources_action()** - Sync wrapper for legacy CLI paths. -""" - -from __future__ import annotations - -import inspect - -from mcp_cli.utils.async_utils import run_blocking -from chuk_term.ui import output, format_table -from mcp_cli.context import get_context -from mcp_cli.commands.models import ResourceInfoResponse - - -def _human_size(size: int | None) -> str: - """Convert size in bytes to human-readable string (KB/MB/GB).""" - if size is None or size < 0: - return "-" - current_size: float = float(size) - for unit in ("B", "KB", "MB", "GB"): - if current_size < 1024: - return f"{current_size:.0f} {unit}" - current_size = current_size / 1024 - return f"{current_size:.1f} TB" - - -async def resources_action_async() -> list[ResourceInfoResponse]: - """ - Fetch and display resources from all connected MCP servers. - - Returns: - List of resource response models from all servers. - """ - context = get_context() - tm = context.tool_manager - - if not tm: - output.error("No tool manager available") - return [] - - try: - maybe = tm.list_resources() - resources = await maybe if inspect.isawaitable(maybe) else maybe - except Exception as exc: # noqa: BLE001 - output.error(f"{exc}") - return [] - - resources = resources or [] - if not resources: - output.info("No resources recorded.") - return [] - - # Convert to Pydantic models - resource_models = [] - table_data = [] - columns = ["Server", "URI", "Size", "MIME-type"] - - for item in resources: - # Create Pydantic model - resource_model = ResourceInfoResponse( - uri=item.get("uri", "-"), - name=item.get("name"), - description=item.get("description"), - mime_type=item.get("mimeType"), - server=item.get("server", "-"), - ) - resource_models.append(resource_model) - - # Build table row - table_data.append( - { - "Server": resource_model.server, - "URI": resource_model.uri, - "Size": _human_size(item.get("size")), - "MIME-type": resource_model.mime_type or "-", - } - ) - - # Display table - table = format_table(table_data, title="Resources", columns=columns) - output.print_table(table) - return resource_models - - -def resources_action() -> list[ResourceInfoResponse]: - """ - Sync wrapper for resources_action_async. - - Returns: - List of resource dictionaries from all servers. - - Raises: - RuntimeError: If called from inside an active event loop. - """ - return run_blocking(resources_action_async()) - - -__all__ = [ - "resources_action_async", - "resources_action", -] diff --git a/src/mcp_cli/commands/actions/servers.py b/src/mcp_cli/commands/actions/servers.py deleted file mode 100644 index 67e1649a..00000000 --- a/src/mcp_cli/commands/actions/servers.py +++ /dev/null @@ -1,781 +0,0 @@ -# src/mcp_cli/commands/actions/servers.py -""" -Servers action for MCP CLI. - -List and manage MCP servers with runtime configuration support. -Integrates with preference manager for user servers (~/.mcp-cli) -and config manager for project servers (server_config.json). -""" - -from __future__ import annotations - -import json -import time -from typing import Any - -from mcp_cli.utils.async_utils import run_blocking -from chuk_term.ui import output, format_table -from mcp_cli.context import get_context -from mcp_cli.config.config_manager import ConfigManager, ServerConfig -from mcp_cli.utils.preferences import get_preference_manager -from mcp_cli.commands.models import ( - ServerActionParams, - ServerInfoResponse, - ServerCapabilities, -) -from mcp_cli.commands.enums import ServerCommand, TransportType - - -def _get_server_icon( - capabilities: dict[str, Any] | ServerCapabilities, tool_count: int -) -> str: - """Determine server icon based on MCP capabilities.""" - # Convert dict to ServerCapabilities if needed - if isinstance(capabilities, dict): - caps = ServerCapabilities(**capabilities) - else: - caps = capabilities - - if caps.resources and caps.prompts: - return "🎯" # Full-featured server - elif caps.resources: - return "📁" # Resource-capable server - elif caps.prompts: - return "💬" # Prompt-capable server - elif tool_count > 15: - return "🔧" # Tool-heavy server - elif tool_count > 0: - return "⚙️" # Basic tool server - else: - return "📦" # Minimal server - - -def _format_performance(ping_ms: float | None) -> tuple[str, str]: - """Format performance metrics with color coding.""" - if ping_ms is None: - return "❓", "Unknown" - - if ping_ms < 10: - return "🚀", f"{ping_ms:.1f}ms" - elif ping_ms < 50: - return "✅", f"{ping_ms:.1f}ms" - elif ping_ms < 100: - return "⚠️", f"{ping_ms:.1f}ms" - else: - return "🔴", f"{ping_ms:.1f}ms" - - -def _format_capabilities(capabilities: dict[str, Any] | ServerCapabilities) -> str: - """Format server capabilities as readable string.""" - # Convert dict to ServerCapabilities if needed - if isinstance(capabilities, dict): - caps_model = ServerCapabilities(**capabilities) - else: - caps_model = capabilities - - return caps_model.to_display_string() - - -def _get_server_status( - server_config: ServerConfig | dict[str, Any], connected: bool = False -) -> tuple[str, str, str]: - """ - Get server status. - Returns (status_icon, status_text, status_reason) - """ - # Handle both ServerConfig objects and dicts - if isinstance(server_config, ServerConfig): - disabled = server_config.disabled - has_command = server_config.command is not None - has_url = server_config.url is not None - command = server_config.command - url = server_config.url - transport = "http" if has_url else "stdio" - else: - disabled = server_config.get("disabled", False) - has_command = server_config.get("command") is not None - has_url = server_config.get("url") is not None - command = server_config.get("command") - url = server_config.get("url") - transport = server_config.get("transport", "http") - - if disabled: - return "⏸️", "Disabled", "Server is disabled" - - if connected: - return "✅", "Connected", "Server is active" - - # Check if server config is valid - if not has_command and not has_url: - return "❌", "Not Configured", "No command or URL specified" - - if has_url: - # HTTP/SSE server - return "🌐", transport.upper(), f"URL: {url}" - elif has_command: - # STDIO server - return "📡", "STDIO", f"Command: {command}" - - return "❓", "Unknown", "Unknown server type" - - -async def _list_servers(show_all: bool = False) -> None: - """ - List all servers from both config file and runtime preferences. - - Args: - show_all: Show all servers including disabled ones - """ - context = get_context() - tm = context.tool_manager - pref_manager = get_preference_manager() - config_manager = ConfigManager() - - # Get project servers from config file - try: - config = config_manager.get_config() - except RuntimeError: - config = config_manager.initialize() - - # Get runtime servers from preferences - runtime_servers = pref_manager.get_runtime_servers() - - # Get connected servers from tool manager - connected_servers = [] - if tm: - try: - connected_servers = ( - await tm.get_server_info() if hasattr(tm, "get_server_info") else [] - ) - except Exception: - pass - - # Track which servers are connected - connected_names = {s.name.lower() for s in connected_servers} - - # Build combined server list - table_data = [] - columns = ["Server", "Status", "Transport", "Tools", "Source"] - - # Add project servers from config file - for name, server_config in config.servers.items(): - if not show_all and server_config.disabled: - continue - - is_connected = name.lower() in connected_names - - # Get connection info if available - tool_count = 0 - if is_connected: - for cs in connected_servers: - if cs.name.lower() == name.lower(): - tool_count = cs.tool_count - break - - # Convert ServerConfig to dict for status check - server_dict = { - "command": server_config.command, - "url": server_config.url, - "disabled": server_config.disabled, - "transport": server_config.transport, - } - - status_icon, status_text, _ = _get_server_status(server_dict, is_connected) - display_name = f"→ {name}" if is_connected else f" {name}" - - table_data.append( - { - "Server": display_name, - "Status": f"{status_icon} {status_text}", - "Transport": server_config.transport.upper(), - "Tools": str(tool_count) if tool_count > 0 else "-", - "Source": "Config", - } - ) - - # Add runtime servers from preferences - for name, server_dict in runtime_servers.items(): - # Check if disabled via preferences - if not show_all and pref_manager.is_server_disabled(name): - continue - - is_connected = name.lower() in connected_names - - # Get connection info if available - tool_count = 0 - if is_connected: - for cs in connected_servers: - if cs.name.lower() == name.lower(): - tool_count = cs.tool_count - break - - # Add disabled flag if needed - server_dict["disabled"] = pref_manager.is_server_disabled(name) - - status_icon, status_text, _ = _get_server_status(server_dict, is_connected) - display_name = f"→ {name}" if is_connected else f" {name}" - - # Determine transport - if server_dict.get("url"): - transport = str(server_dict.get("transport", "HTTP")) - else: - transport = "STDIO" - - table_data.append( - { - "Server": display_name, - "Status": f"{status_icon} {status_text}", - "Transport": transport.upper(), - "Tools": str(tool_count) if tool_count > 0 else "-", - "Source": "User", - } - ) - - # Add any connected servers not in either config or runtime - for server in connected_servers: - server_lower = server.name.lower() - if server_lower not in { - n.lower() for n in config.servers.keys() - } and server_lower not in {n.lower() for n in runtime_servers.keys()}: - table_data.append( - { - "Server": f"→ {server.name}", - "Status": "✅ Connected", - "Transport": server.transport.upper(), - "Tools": str(server.tool_count), - "Source": "Active", - } - ) - - if not table_data: - output.info("No servers configured.") - output.tip("Add a server with: /server add stdio [args...]") - output.tip("Or: /server add http ") - return - - # Display table - output.rule("[bold]🔌 MCP Servers[/bold]", style="primary") - - table = format_table( - table_data, - title=None, - columns=columns, - ) - output.print_table(table) - output.print() - - # Show management tips - output.tip("💡 Server Management:") - output.info(" • Add: /server add stdio [args...]") - output.info(" • Add: /server add --transport http ") - output.info(" • Remove: /server remove ") - output.info(" • Enable/Disable: /server enable|disable ") - - # Show counts - config_count = len(config.servers) - runtime_count = len(runtime_servers) - if config_count or runtime_count: - output.info( - f"\nServers: {config_count} from config, {runtime_count} user-added" - ) - - -async def _add_server( - name: str, - transport: str, - config_args: list[str], - env_vars: dict[str, str] | None = None, - headers: dict[str, str] | None = None, -) -> None: - """ - Add a new MCP server to user preferences (~/.mcp-cli). - - Args: - name: Server name - transport: Transport type (stdio, http, or sse) - config_args: Configuration arguments (command+args for stdio, url for http/sse) - env_vars: Optional environment variables - headers: Optional headers for HTTP/SSE servers - """ - pref_manager = get_preference_manager() - - # Check if server already exists - if pref_manager.get_runtime_server(name): - output.error( - f"Server '{name}' already exists. Use '/server update' to modify it." - ) - return - - # Check in config file too - config_manager = ConfigManager() - try: - config = config_manager.get_config() - if name in config.servers: - output.error( - f"Server '{name}' exists in project config. Remove it first or use a different name." - ) - return - except RuntimeError: - pass - - # Build server configuration as a dictionary for preferences - server_config: dict[str, Any] = {"transport": transport.lower()} - - if transport.lower() == "stdio": - if not config_args: - output.error("STDIO server requires a command") - return - - server_config["command"] = config_args[0] - if len(config_args) > 1: - server_config["args"] = config_args[1:] - if env_vars: - server_config["env"] = env_vars - - elif transport.lower() in ["http", "sse"]: - if not config_args: - output.error(f"{transport.upper()} server requires a URL") - return - - server_config["url"] = config_args[0] - server_config["transport"] = transport.lower() - - # Store environment variables and headers - if env_vars: - server_config["env"] = env_vars - if headers: - server_config["headers"] = headers - else: - output.error(f"Unknown transport type: {transport}") - output.info("Valid types: stdio, http, sse") - return - - # Save to preferences - pref_manager.add_runtime_server(name, server_config) - - output.success(f"✅ Added server '{name}' to user configuration") - output.info(f" Transport: {transport.upper()}") - - if transport.lower() == "stdio": - output.info(f" Command: {server_config['command']}") - if server_config.get("args"): - output.info(f" Args: {' '.join(server_config['args'])}") - else: - output.info(f" URL: {server_config['url']}") - - if env_vars: - output.info(f" Environment: {', '.join(env_vars.keys())}") - - if headers: - output.info(f" Headers: {', '.join(headers.keys())}") - - output.tip( - "Restart the chat session or use '/server reload' to connect to the new server" - ) - - -async def _remove_server(name: str) -> None: - """Remove a server from user preferences or project config.""" - pref_manager = get_preference_manager() - - # Try to remove from runtime servers first - if pref_manager.remove_runtime_server(name): - output.success(f"✅ Removed server '{name}' from user configuration") - output.tip("Restart the chat session to apply changes") - return - - # Try project config - config_manager = ConfigManager() - try: - config = config_manager.get_config() - except RuntimeError: - config = config_manager.initialize() - - if name in config.servers: - output.warning(f"Server '{name}' is in project configuration.") - output.info("To remove it, edit server_config.json directly") - return - - output.error(f"Server '{name}' not found") - - # Show available servers - runtime_servers = pref_manager.get_runtime_servers() - all_servers = list(runtime_servers.keys()) + list(config.servers.keys()) - if all_servers: - output.info(f"Available servers: {', '.join(all_servers)}") - - -async def _enable_disable_server(name: str, enable: bool) -> None: - """Enable or disable a server in preferences.""" - pref_manager = get_preference_manager() - - # Check if server exists - runtime_server = pref_manager.get_runtime_server(name) - - # Also check project config - config_manager = ConfigManager() - try: - config = config_manager.get_config() - project_server = config.get_server(name) - except RuntimeError: - config = config_manager.initialize() - project_server = None - - if not runtime_server and not project_server: - output.error(f"Server '{name}' not found") - return - - # Update preference - if enable: - pref_manager.enable_server(name) - output.success(f"✅ Server '{name}' enabled") - else: - pref_manager.disable_server(name) - output.success(f"✅ Server '{name}' disabled") - - output.tip("Restart the chat session to apply changes") - - -async def _show_server_details(name: str) -> None: - """Show detailed information about a server.""" - pref_manager = get_preference_manager() - config_manager = ConfigManager() - - # Check runtime servers - server_config = pref_manager.get_runtime_server(name) - source = "User" - - # Check project config if not found - if not server_config: - try: - config = config_manager.get_config() - project_server = config.get_server(name) - if project_server: - server_config = { - "command": project_server.command, - "args": project_server.args, - "env": project_server.env, - "url": project_server.url, - "transport": project_server.transport, - "disabled": project_server.disabled, - } - source = "Config" - except RuntimeError: - pass - - if not server_config: - # Try to show details for a connected server - context = get_context() - tm = context.tool_manager - if tm: - try: - servers = ( - await tm.get_server_info() if hasattr(tm, "get_server_info") else [] - ) - for server in servers: - if server.name.lower() == name.lower(): - await _show_connected_server_details(server) - return - except Exception: - pass - - output.error(f"Server '{name}' not found") - return - - # Display detailed info - is_disabled = pref_manager.is_server_disabled(name) or server_config.get( - "disabled", False - ) - icon = "⏸️" if is_disabled else "✅" - - output.rule(f"[bold]{icon} Server: {name}[/bold]", style="primary") - output.print() - - output.print(f" [bold]Source:[/bold] {source}") - output.print( - f" [bold]Status:[/bold] {'Disabled' if is_disabled else 'Enabled'}" - ) - output.print( - f" [bold]Transport:[/bold] {server_config.get('transport', 'stdio').upper()}" - ) - - if server_config.get("command"): - output.print(f" [bold]Command:[/bold] {server_config['command']}") - if server_config.get("args"): - output.print( - f" [bold]Arguments:[/bold] {' '.join(server_config['args'])}" - ) - - if server_config.get("url"): - output.print(f" [bold]URL:[/bold] {server_config['url']}") - - if server_config.get("env"): - output.print(" [bold]Environment:[/bold]") - for key, value in server_config["env"].items(): - # Mask sensitive values - display_value = ( - "***" if "key" in key.lower() or "token" in key.lower() else value - ) - output.print(f" • {key}={display_value}") - - if server_config.get("headers"): - output.print(" [bold]Headers:[/bold]") - for key, value in server_config["headers"].items(): - # Mask sensitive headers - display_value = ( - "***" - if "auth" in key.lower() or "token" in key.lower() - else value[:20] + "..." - ) - output.print(f" • {key}: {display_value}") - - output.print() - - # Show management options - if is_disabled: - output.tip(f"Enable with: /server enable {name}") - else: - output.tip(f"Disable with: /server disable {name}") - - if source == "User": - output.tip(f"Remove with: /server remove {name}") - - -async def _show_connected_server_details(server) -> None: - """Show details for a connected server.""" - icon = _get_server_icon(server.capabilities, server.tool_count) - - output.rule(f"[bold]{icon} Server: {server.name}[/bold]", style="primary") - output.print() - - output.print(f" [bold]Transport:[/bold] {server.transport}") - output.print(" [bold]Status:[/bold] ✅ Connected") - output.print(f" [bold]Tools:[/bold] {server.tool_count} available") - output.print( - f" [bold]Capabilities:[/bold] {_format_capabilities(server.capabilities)}" - ) - - output.print() - output.tip("💡 Use: /servers to list all servers | /tools to see available tools") - - -async def servers_action_async(params: ServerActionParams) -> list[ServerInfoResponse]: - """ - MCP server management action. - - Args: - params: Server action parameters - - Example: - >>> params = ServerActionParams(detailed=True, ping_servers=True) - >>> await servers_action_async(params) - - Supports: - - /servers or /server list - List all servers - - /server add stdio [args...] - Add STDIO server - - /server add --transport http - Add HTTP/SSE server - - /server remove - Remove server - - /server enable - Enable server - - /server disable - Disable server - - /server - Show server details - """ - - # Handle command-style invocation with args - if params.args: - if not params.args: - await _list_servers() - return [] - - sub, *rest = params.args - sub = sub.lower() - - # List servers - if sub == ServerCommand.LIST.value: - show_all = bool(rest and rest[0].lower() == "all") - await _list_servers(show_all) - return [] - - # Add server with support for --transport, --env, --header - if sub == ServerCommand.ADD.value and len(rest) >= 2: - name = rest[0] - - # Parse options - transport = TransportType.STDIO.value # default - config_args = [] - env_vars = {} - headers = {} - - i = 1 - while i < len(rest): - arg = rest[i] - - if arg == "--transport" and i + 1 < len(rest): - transport = rest[i + 1] - i += 2 - elif arg == "--env" and i + 1 < len(rest): - env_str = rest[i + 1] - if "=" in env_str: - key, value = env_str.split("=", 1) - env_vars[key] = value - i += 2 - elif arg == "--header" and i + 1 < len(rest): - header_str = rest[i + 1] - if ":" in header_str: - key, value = header_str.split(":", 1) - headers[key.strip()] = value.strip() - elif "=" in header_str: - key, value = header_str.split("=", 1) - headers[key.strip()] = value.strip() - i += 2 - elif arg == "--": - # Everything after -- is the command/URL and args - config_args = rest[i + 1 :] - break - else: - # First non-option arg could be transport - if i == 1 and arg in [ - TransportType.STDIO.value, - TransportType.HTTP.value, - TransportType.SSE.value, - ]: - transport = arg - i += 1 - else: - config_args = rest[i:] - break - - await _add_server(name, transport, config_args, env_vars, headers) - return [] - - # Remove server - if sub == ServerCommand.REMOVE.value and rest: - await _remove_server(rest[0]) - return [] - - # Enable server - if sub == ServerCommand.ENABLE.value and rest: - await _enable_disable_server(rest[0], True) - return [] - - # Disable server - if sub == ServerCommand.DISABLE.value and rest: - await _enable_disable_server(rest[0], False) - return [] - - # Show server details - await _show_server_details(sub) - return [] - - # Original functionality for backwards compatibility - context = get_context() - tm = context.tool_manager - - if not tm: - output.error("No tool manager available") - return [] - - # Get server information - try: - servers = await tm.get_server_info() if hasattr(tm, "get_server_info") else [] - except Exception as e: - output.error(f"Failed to get server info: {e}") - return [] - - if not servers: - output.info("No servers connected.") - output.tip("Add a server with: /server add stdio [args...]") - return [] - - # Process server data - server_data: list[ServerInfoResponse] = [] - for idx, server in enumerate(servers): - # ServerInfo is a dataclass with these attributes - name = server.name - transport = server.transport - capabilities = server.capabilities - tool_count = server.tool_count - status = server.display_status - - # Ping if requested - ping_ms = None - if params.ping_servers: - try: - start = time.perf_counter() - if hasattr(tm, "ping_server"): - await tm.ping_server(idx) - ping_ms = (time.perf_counter() - start) * 1000 - except Exception: - ping_ms = None - - # Build clean server info response model - info = ServerInfoResponse( - name=name, - transport=transport, - capabilities=capabilities, - tool_count=tool_count, - status=status, - ping_ms=ping_ms, - ) - - server_data.append(info) - - # Output based on format - if params.output_format == "json": - # Convert Pydantic models to dicts for JSON serialization - output.print(json.dumps([s.model_dump() for s in server_data], indent=2)) - else: - # Build table - columns = ["Icon", "Server", "Transport", "Tools", "Capabilities"] - if params.ping_servers: - columns.append("Ping") - - table_data: list[dict[str, Any]] = [] - for server_info in server_data: - icon = _get_server_icon(server_info.capabilities, server_info.tool_count) - row: dict[str, Any] = { - "Icon": icon, - "Server": server_info.name, - "Transport": server_info.transport, - "Tools": str(server_info.tool_count), - "Capabilities": _format_capabilities(server_info.capabilities), - } - - if params.ping_servers: - perf_icon, perf_text = _format_performance(server_info.ping_ms) - row["Ping"] = f"{perf_icon} {perf_text}" - - table_data.append(row) - - # Display table with themed styling - output.rule("[bold]Connected MCP Servers[/bold]", style="primary") - - table = format_table( - table_data, - title=None, - columns=columns, - ) - output.print_table(table) - output.print() - - output.tip( - "💡 Use: /server for details | /server add stdio " - ) - - return server_data - - -def servers_action(**kwargs) -> list[ServerInfoResponse]: - """ - Sync wrapper for servers_action_async. - - Returns: - List of server information dictionaries. - """ - params = ServerActionParams(**kwargs) - return run_blocking(servers_action_async(params)) - - -__all__ = [ - "servers_action_async", - "servers_action", -] diff --git a/src/mcp_cli/commands/actions/theme.py b/src/mcp_cli/commands/actions/theme.py deleted file mode 100644 index 24e1cd25..00000000 --- a/src/mcp_cli/commands/actions/theme.py +++ /dev/null @@ -1,194 +0,0 @@ -# src/mcp_cli/commands/actions/theme.py -"""Theme command for direct CLI access.""" - -import asyncio - -from chuk_term.ui import output, format_table -from chuk_term.ui.theme import set_theme -from chuk_term.ui.prompts import ask - -from mcp_cli.utils.preferences import get_preference_manager, Theme -from mcp_cli.commands.models import ThemeActionParams - - -def theme_command( - theme_name: str | None = None, - list_themes: bool = False, - select: bool = False, -) -> None: - """Manage UI themes for MCP CLI. - - Args: - theme_name: Name of theme to switch to - list_themes: Show all available themes - select: Interactive theme selection - """ - pref_manager = get_preference_manager() - - # List themes - if list_themes or (not theme_name and not select): - current = pref_manager.get_theme() - output.rule("Available Themes") - - themes = [t.value for t in Theme] - theme_descriptions = { - "default": "Balanced colors for all terminals", - "dark": "Dark mode with muted colors", - "light": "Light mode with bright colors", - "minimal": "Minimal color usage", - "terminal": "Uses terminal's default colors", - "monokai": "Popular dark theme from code editors", - "dracula": "Dark theme with purple accents", - "solarized": "Precision colors for readability", - } - - for theme in themes: - desc = theme_descriptions.get(theme, "") - if theme == current: - output.info(f"• {theme} (current) - {desc}") - else: - output.print(f"• {theme} - {desc}") - - output.print() - # Show appropriate hint based on context (chat mode uses /) - output.hint("Use '/theme ' to switch themes") - return - - # Interactive selection - if select: - asyncio.run(_interactive_theme_selection(pref_manager)) - return - - # Switch to specific theme - if theme_name: - valid_themes = [t.value for t in Theme] - if theme_name.lower() in valid_themes: - try: - # Apply theme immediately - set_theme(theme_name.lower()) - - # Save preference - pref_manager.set_theme(theme_name.lower()) - - output.success(f"Theme switched to: {theme_name.lower()}") - output.print("\nTheme saved to your preferences.") - - except Exception as e: - output.error(f"Failed to switch theme: {e}") - else: - output.error(f"Invalid theme: {theme_name}") - output.hint(f"Valid themes are: {', '.join(valid_themes)}") - output.hint("Use '/theme' to see all themes") - - -async def _interactive_theme_selection(pref_manager): - """Interactive theme selection with preview.""" - themes = [t.value for t in Theme] - current = pref_manager.get_theme() - - # Theme descriptions - theme_descriptions = { - "default": "Balanced colors for all terminals", - "dark": "Dark mode with muted colors", - "light": "Light mode with bright colors", - "minimal": "Minimal color usage", - "terminal": "Uses terminal's default colors", - "monokai": "Popular dark theme from code editors", - "dracula": "Dark theme with purple accents", - "solarized": "Precision colors for readability", - } - - # Display themes in a nice table format - output.rule("Theme Selector") - - # Build table data for chuk_term - table_data = [] - columns = ["#", "Theme", "Description", "Status"] - - for idx, theme in enumerate(themes, 1): - desc = theme_descriptions.get(theme, "") - status = "✓ Current" if theme == current else "" - - table_data.append( - {"#": str(idx), "Theme": theme, "Description": desc, "Status": status} - ) - - # Create and display table using chuk-term - table = format_table(table_data, title="Available Themes", columns=columns) - output.print_table(table) - output.print() - - # Get numeric or name input - current_idx = themes.index(current) + 1 if current in themes else 1 - response = ask( - "Enter theme number (1-8) or name:", default=str(current_idx), show_default=True - ) - - # Parse the response - could be number or theme name - theme_name = None - if response.isdigit(): - idx = int(response) - if 1 <= idx <= len(themes): - theme_name = themes[idx - 1] - else: - output.error(f"Invalid selection: {idx}. Please choose 1-{len(themes)}") - return - else: - # Try to match theme name - response_lower = response.lower() - for theme in themes: - if theme.lower() == response_lower: - theme_name = theme - break - - if not theme_name: - output.error(f"Unknown theme: {response}") - output.hint(f"Valid themes: {', '.join(themes)}") - return - - if theme_name and theme_name != current: - # Apply and save theme - set_theme(theme_name) - pref_manager.set_theme(theme_name) - - output.print() # Add spacing - output.rule(f"Theme: {theme_name}") - output.success(f"Theme switched to: {theme_name}") - output.print() - - # Show preview - _show_theme_preview() - output.print() - output.print("The new theme has been saved to your preferences.") - elif theme_name == current: - output.info(f"Already using theme: {theme_name}") - - -def _show_theme_preview(): - """Show a preview of the current theme.""" - output.print("Theme Preview:") - output.info("Information message") - output.success("Success message") - output.warning("Warning message") - output.error("Error message") - output.hint("Hint message") - - -async def theme_action_async(params: ThemeActionParams) -> None: - """ - Async wrapper for theme command. - - Args: - params: Theme action parameters - - Example: - >>> params = ThemeActionParams(theme_name="dracula") - >>> await theme_action_async(params) - """ - # Convert to sync call - if params.list_themes or not params.theme_name: - # No theme name = list themes - theme_command(list_themes=True) - else: - # Set the theme - theme_command(theme_name=params.theme_name) diff --git a/src/mcp_cli/commands/actions/token.py b/src/mcp_cli/commands/actions/token.py deleted file mode 100644 index c81f0168..00000000 --- a/src/mcp_cli/commands/actions/token.py +++ /dev/null @@ -1,753 +0,0 @@ -"""Token management actions for MCP CLI.""" - -from __future__ import annotations - -import json - -from chuk_term.ui import output, format_table -from mcp_cli.auth import TokenManager -from mcp_cli.auth import TokenStoreBackend, TokenStoreFactory -from mcp_cli.auth import APIKeyToken, BearerToken, TokenType -from mcp_cli.config.config_manager import get_config -from mcp_cli.constants import NAMESPACE, OAUTH_NAMESPACE -from mcp_cli.commands.models import ( - TokenListParams, - TokenSetParams, - TokenDeleteParams, - TokenClearParams, - TokenProviderParams, -) - - -def _get_token_manager() -> TokenManager: - """Get configured token manager instance with mcp-cli namespace.""" - import os - - # Check for CLI override first - backend_override = os.environ.get("MCP_CLI_TOKEN_BACKEND") - if backend_override: - try: - backend = TokenStoreBackend(backend_override) - except (ValueError, KeyError): - # Invalid backend specified, fall through to config - backend = None - else: - backend = None - - # If no override or invalid override, check config - if backend is None: - try: - config = get_config() - backend = TokenStoreBackend(config.token_store_backend) - except Exception: - backend = TokenStoreBackend.AUTO - - return TokenManager(backend=backend, namespace=NAMESPACE, service_name="mcp-cli") - - -async def token_list_action_async(params: TokenListParams) -> None: - """ - List all stored tokens (metadata only, no sensitive data). - - Args: - params: Token list parameters - - Example: - >>> params = TokenListParams(namespace="provider", show_oauth=True) - >>> await token_list_action_async(params) - """ - try: - manager = _get_token_manager() - - output.rule("[bold]🔐 Stored Tokens[/bold]", style="primary") - - # Track if we showed any tokens at all - provider_tokens = {} - oauth_entries = [] - - # Show provider tokens with hierarchical status - if params.show_providers and ( - params.namespace is None or params.namespace == "provider" - ): - from mcp_cli.auth.provider_tokens import ( - list_all_provider_tokens, - ) - - provider_tokens = list_all_provider_tokens(manager) - - if provider_tokens: - output.print( - "\n[bold]Provider API Keys (Stored in Secure Storage):[/bold]" - ) - provider_table_data = [] - - for provider_name, status_info in provider_tokens.items(): - env_var = status_info["env_var"] - - # These are all storage tokens - status_display = "🔐 storage" - - # Note if env var also exists (shows hierarchy) - if status_info["in_env"]: - note = f"(overridden by {env_var})" - else: - note = "active" - - provider_table_data.append( - { - "Provider": provider_name, - "Status": status_display, - "Env Var": env_var, - "Note": note, - } - ) - - provider_table = format_table( - provider_table_data, - title=None, - columns=["Provider", "Status", "Env Var", "Note"], - ) - output.print_table(provider_table) - output.info( - "💡 Environment variables take precedence over stored tokens" - ) - output.print() - - # List OAuth tokens - check servers from provided list - if params.show_oauth and params.server_names: - # Check each configured server for OAuth tokens - for server_name in params.server_names: - # Check if tokens exist for this server - tokens = manager.load_tokens(server_name) - if tokens: - metadata = {} - if tokens.expires_in: - import time - - if tokens.issued_at: - metadata["expires_at"] = ( - tokens.issued_at + tokens.expires_in - ) - else: - metadata["expires_at"] = time.time() + tokens.expires_in - - oauth_entries.append( - { - "name": server_name, - "type": "oauth", - "namespace": OAUTH_NAMESPACE, - "registered_at": tokens.issued_at - if tokens.issued_at - else None, - "metadata": metadata, - } - ) - - if oauth_entries: - output.print("\n[bold]OAuth Tokens (Server Authentication):[/bold]") - oauth_table_data = [] - - for entry in oauth_entries: - token_name = entry.get("name", "unknown") - token_type = entry.get("type", "unknown") - - # Format created date - import time - from datetime import datetime - - registered_at = entry.get("registered_at") - created = "-" - if registered_at and isinstance(registered_at, (int, float)): - dt = datetime.fromtimestamp(registered_at) - created = dt.strftime("%Y-%m-%d") - - # Get expires info from metadata - metadata_raw = entry.get("metadata", {}) - metadata = metadata_raw if isinstance(metadata_raw, dict) else {} - expires = metadata.get("expires_at", "-") - if expires != "-" and isinstance(expires, (int, float)): - exp_dt = datetime.fromtimestamp(expires) - # Check if expired - if time.time() > expires: - expires = f"{exp_dt.strftime('%Y-%m-%d')} ⚠️ Expired" - else: - expires = exp_dt.strftime("%Y-%m-%d") - - oauth_table_data.append( - { - "Server": token_name, - "Type": token_type, - "Created": created, - "Expires": expires, - } - ) - - oauth_table = format_table( - oauth_table_data, - title=None, - columns=["Server", "Type", "Created", "Expires"], - ) - output.print_table(oauth_table) - output.info("💡 Use '/token get ' to view token details") - output.print() - elif params.show_oauth and not params.server_names: - # Show message when no servers configured - output.info("No servers configured. OAuth tokens are stored per server.") - output.print() - - # List tokens from registry (non-provider, non-oauth tokens) - registry = manager.registry - registered_tokens = registry.list_tokens(namespace=params.namespace) - - table_data = [] - for entry in registered_tokens: - token_type = entry.get("type", "unknown") - token_name = entry.get("name", "unknown") - token_namespace = entry.get("namespace", "unknown") - - # Skip provider namespace if show_providers is True (already shown above) - if params.show_providers and token_namespace == "provider": - continue - - # Skip OAuth namespace if show_oauth is True (already shown above) - if params.show_oauth and token_namespace == OAUTH_NAMESPACE: - continue - - # Filter by type - if token_type == TokenType.BEARER.value and not params.show_bearer: - continue - if token_type == TokenType.API_KEY.value and not params.show_api_keys: - continue - - # Format created date - import time - from datetime import datetime - - registered_at = entry.get("registered_at") - created = "-" - if registered_at and isinstance(registered_at, (int, float)): - dt = datetime.fromtimestamp(registered_at) - created = dt.strftime("%Y-%m-%d") - - # Get expires info from metadata - metadata_raw = entry.get("metadata", {}) - metadata = metadata_raw if isinstance(metadata_raw, dict) else {} - expires = metadata.get("expires_at", "-") - if expires != "-" and isinstance(expires, (int, float)): - exp_dt = datetime.fromtimestamp(expires) - # Check if expired - if time.time() > expires: - expires = f"{exp_dt.strftime('%Y-%m-%d')} ⚠️" - else: - expires = exp_dt.strftime("%Y-%m-%d") - - # Build details (provider, namespace if not generic) - details = [] - if metadata.get("provider"): - details.append(f"provider={metadata['provider']}") - if token_namespace != "generic": - details.append(f"ns={token_namespace}") - - table_data.append( - { - "Type": token_type, - "Name": token_name, - "Created": created, - "Expires": expires, - "Details": ", ".join(details) if details else "-", - } - ) - - if table_data: - output.print("\n[bold]Other Tokens:[/bold]") - table = format_table( - table_data, - title=None, - columns=["Type", "Name", "Created", "Expires", "Details"], - ) - output.print_table(table) - elif not provider_tokens and not oauth_entries: - # Only show "No tokens found" if we truly have no tokens at all - output.warning("No tokens found.") - - output.print() - output.tip("💡 Token Management:") - output.info(" • Store provider key: mcp-cli token set-provider ") - output.info(" • Store bearer token: mcp-cli token set --type bearer") - output.info(" • View: mcp-cli token get ") - output.info(" • Delete: mcp-cli token delete ") - - except Exception as e: - output.error(f"Error listing tokens: {e}") - raise - - -async def token_set_action_async(params: TokenSetParams) -> None: - """ - Store a token manually. - - Args: - params: Token set parameters - - Example: - >>> params = TokenSetParams(name="my-token", token_type="bearer", value="abc123") - >>> await token_set_action_async(params) - """ - try: - manager = _get_token_manager() - store = manager.token_store - - # Prompt for value if not provided - value = params.value - if value is None: - from getpass import getpass - - value = getpass(f"Enter token value for '{params.name}': ") - - if not value: - output.error("Token value is required") - return - - # Store based on type - registry = manager.registry - - if params.token_type == "bearer": - bearer = BearerToken(token=value) - stored = bearer.to_stored_token(params.name) - stored.metadata = {"namespace": params.namespace} - store._store_raw( - f"{params.namespace}:{params.name}", json.dumps(stored.model_dump()) - ) - - # Register in index with expiration if available - reg_metadata = {} - if bearer.expires_at: - reg_metadata["expires_at"] = bearer.expires_at - - registry.register( - params.name, TokenType.BEARER, params.namespace, metadata=reg_metadata - ) - output.success(f"Bearer token '{params.name}' stored successfully") - - elif params.token_type == "api-key": - if not params.provider: - output.error("Provider name is required for API keys") - output.hint("Use: --provider ") - return - - api_key = APIKeyToken(provider=params.provider, key=value) - stored = api_key.to_stored_token(params.name) - stored.metadata = {"namespace": params.namespace} - store._store_raw( - f"{params.namespace}:{params.name}", json.dumps(stored.model_dump()) - ) - - # Register in index - registry.register( - params.name, - TokenType.API_KEY, - params.namespace, - metadata={"provider": params.provider}, - ) - output.success( - f"API key '{params.name}' for '{params.provider}' stored successfully" - ) - - elif params.token_type == "generic": - store.store_generic(params.name, value, params.namespace) - - # Register in index - registry.register( - params.name, TokenType.BEARER, params.namespace, metadata={} - ) - output.success( - f"Token '{params.name}' stored in namespace '{params.namespace}'" - ) - - else: - output.error(f"Unknown token type: {params.token_type}") - output.hint("Valid types: bearer, api-key, generic") - - except Exception as e: - output.error(f"Error storing token: {e}") - raise - - -async def token_get_action_async(name: str, namespace: str = "generic") -> None: - """ - Get information about a stored token. - - Args: - name: Token identifier/name - namespace: Storage namespace - """ - try: - manager = _get_token_manager() - store = manager.token_store - - raw_data = store._retrieve_raw(f"{namespace}:{name}") - if not raw_data: - output.warning(f"Token '{name}' not found in namespace '{namespace}'") - return - - try: - from mcp_cli.auth import StoredToken - - stored = StoredToken.model_validate(json.loads(raw_data)) - info = stored.get_display_info() - - output.rule(f"[bold]Token: {name}[/bold]", style="primary") - output.info(f"Type: {stored.token_type.value}") - output.info(f"Namespace: {namespace}") - - for key, value in info.items(): - if key not in ["name", "type"]: - output.info(f"{key}: {value}") - - except Exception as e: - output.warning(f"Could not parse token data: {e}") - - except Exception as e: - output.error(f"Error retrieving token: {e}") - raise - - -async def token_delete_action_async(params: TokenDeleteParams) -> None: - """ - Delete a stored token. - - Args: - params: Token delete parameters - - Example: - >>> params = TokenDeleteParams(name="my-token", namespace="generic") - >>> await token_delete_action_async(params) - """ - try: - manager = _get_token_manager() - store = manager.token_store - registry = manager.registry - - if params.oauth: - # Delete OAuth token - if manager.delete_tokens(params.name): - output.success(f"OAuth token for server '{params.name}' deleted") - else: - output.warning(f"OAuth token for server '{params.name}' not found") - return - - # Delete generic token - if params.namespace: - namespaces = [params.namespace] - else: - namespaces = ["bearer", "api-key", "provider", "generic"] - - deleted = False - for ns in namespaces: - if store.delete_generic(params.name, ns): - # Unregister from index - registry.unregister(params.name, ns) - output.success(f"Token '{params.name}' deleted from namespace '{ns}'") - deleted = True - break - - if not deleted: - output.warning(f"Token '{params.name}' not found") - - except Exception as e: - output.error(f"Error deleting token: {e}") - raise - - -async def token_set_provider_action_async(params: TokenProviderParams) -> None: - """ - Store a provider API key in secure storage. - - Args: - params: Token provider parameters - - Example: - >>> params = TokenProviderParams(provider="openai", api_key="sk-xxx") - >>> await token_set_provider_action_async(params) - """ - try: - from mcp_cli.auth.provider_tokens import ( - set_provider_token, - get_provider_env_var_name, - ) - import os - - manager = _get_token_manager() - - # Prompt for api_key if not provided - api_key = params.api_key - if api_key is None: - from getpass import getpass - - api_key = getpass(f"Enter API key for '{params.provider}': ") - - if not api_key: - output.error("API key is required") - return - - # Store the token - if set_provider_token(params.provider, api_key, manager): - output.success(f"✅ Stored API key for provider '{params.provider}'") - - # Show hierarchy info - env_var = get_provider_env_var_name(params.provider) - output.print() - output.info("📋 Token Hierarchy:") - output.info(f" 1. Environment variable: {env_var} (highest priority)") - output.info(" 2. Secure storage: 🔐 (currently set)") - - # Check if env var is also set - if os.environ.get(env_var): - output.warning( - f"\n⚠️ Note: {env_var} is set in environment and will take precedence" - ) - else: - output.error(f"Failed to store API key for provider '{params.provider}'") - - except Exception as e: - output.error(f"Error storing provider token: {e}") - raise - - -async def token_get_provider_action_async(params: TokenProviderParams) -> None: - """ - Get information about a provider's API key. - - Args: - params: Token provider parameters - - Example: - >>> params = TokenProviderParams(provider="openai") - >>> await token_get_provider_action_async(params) - """ - try: - from mcp_cli.auth.provider_tokens import ( - check_provider_token_status, - ) - - manager = _get_token_manager() - status = check_provider_token_status(params.provider, manager) - - output.rule(f"[bold]Provider Token: {params.provider}[/bold]", style="primary") - - # Display status - if status["has_token"]: - output.success("✅ API key is configured") - output.info(f" Source: {status['source']}") - else: - output.warning("❌ No API key configured") - - output.print() - output.info("Token Status:") - output.info( - f" • Environment variable ({status['env_var']}): {'✅ set' if status['in_env'] else '❌ not set'}" - ) - output.info( - f" • Secure storage: {'✅ set' if status['in_storage'] else '❌ not set'}" - ) - - output.print() - output.tip("Hierarchy: Environment variables take precedence over storage") - - if not status["has_token"]: - output.print() - output.info("To set API key:") - output.info( - f" • Via storage: mcp-cli token set-provider {params.provider}" - ) - output.info(f" • Via environment: export {status['env_var']}=your-key") - - except Exception as e: - output.error(f"Error retrieving provider token info: {e}") - raise - - -async def token_delete_provider_action_async(params: TokenProviderParams) -> None: - """ - Delete a provider API key from secure storage. - - Note: This only removes from storage, not environment variables. - - Args: - params: Token provider parameters - """ - try: - from mcp_cli.auth.provider_tokens import ( - check_provider_token_status, - ) - - provider = params.provider - manager = _get_token_manager() - status = check_provider_token_status(provider, manager) - - output.rule(f"[bold]Provider Token: {provider}[/bold]", style="primary") - - # Display status - if status["has_token"]: - output.success("✅ API key is configured") - output.info(f" Source: {status['source']}") - else: - output.warning("❌ No API key configured") - - output.print() - output.info("Token Status:") - output.info( - f" • Environment variable ({status['env_var']}): {'✅ set' if status['in_env'] else '❌ not set'}" - ) - output.info( - f" • Secure storage: {'✅ set' if status['in_storage'] else '❌ not set'}" - ) - - output.print() - output.tip("Hierarchy: Environment variables take precedence over storage") - - if not status["has_token"]: - output.print() - output.info("To set API key:") - output.info( - f" • Via storage: mcp-cli token set-provider {params.provider}" - ) - output.info(f" • Via environment: export {status['env_var']}=your-key") - - except Exception as e: - output.error(f"Error retrieving provider token info: {e}") - raise - - -async def token_clear_action_async(params: TokenClearParams) -> None: - """ - Clear all stored tokens. - - Args: - params: Token clear parameters - - Example: - >>> params = TokenClearParams(namespace="provider", force=True) - >>> await token_clear_action_async(params) - """ - try: - manager = _get_token_manager() - store = manager.token_store - registry = manager.registry - - # Confirm before clearing - if not params.force: - if params.namespace: - msg = f"Clear all tokens in namespace '{params.namespace}'?" - else: - msg = "Clear ALL tokens from ALL namespaces?" - - from chuk_term.ui.prompts import confirm - - if not confirm(msg): - output.warning("Cancelled") - return - - # Get tokens to clear from registry - tokens_to_clear = registry.list_tokens(namespace=params.namespace) - - if not tokens_to_clear: - output.warning("No tokens to clear") - return - - # Clear each token from storage - count = 0 - for entry in tokens_to_clear: - token_name = entry.get("name") - token_namespace = entry.get("namespace") - if ( - token_name - and token_namespace - and store.delete_generic(token_name, token_namespace) - ): - count += 1 - - # Clear from registry - if params.namespace: - registry.clear_namespace(params.namespace) - else: - registry.clear_all() - - if count > 0: - output.success(f"Cleared {count} token(s)") - else: - output.warning("No tokens to clear") - - except Exception as e: - output.error(f"Error clearing tokens: {e}") - raise - - -async def token_backends_action_async() -> None: - """List available token storage backends.""" - import os - - try: - available = TokenStoreFactory.get_available_backends() - - # Check for CLI override first - backend_override = os.environ.get("MCP_CLI_TOKEN_BACKEND") - override_succeeded = False - if backend_override: - try: - detected = TokenStoreBackend(backend_override) - override_succeeded = True - except (ValueError, KeyError): - # Invalid backend specified, use auto-detection - detected = TokenStoreFactory._detect_backend() - output.warning( - f"Invalid backend '{backend_override}', using auto-detected backend" - ) - else: - detected = TokenStoreFactory._detect_backend() - - output.rule("[bold]🔒 Token Storage Backends[/bold]", style="primary") - - all_backends = [ - ("keychain", "macOS Keychain"), - ("windows", "Windows Credential Manager"), - ("secretservice", "Linux Secret Service"), - ("vault", "HashiCorp Vault"), - ("encrypted", "Encrypted File Storage"), - ] - - table_data = [] - for backend_id, backend_name in all_backends: - backend = TokenStoreBackend(backend_id) - is_available = backend in available - is_detected = backend == detected - - status = [] - if is_detected: - status.append("🎯 Auto-detected") - if is_available: - status.append("✓") - - table_data.append( - { - "Backend": backend_name, - "Available": "✓" if is_available else "✗", - "Status": " ".join(status) if status else "-", - } - ) - - table = format_table( - table_data, title=None, columns=["Backend", "Available", "Status"] - ) - output.print_table(table) - output.print() - if override_succeeded: - output.info( - f"Current backend: {detected.value} (overridden via --token-backend)" - ) - else: - output.info(f"Current backend: {detected.value}") - - except Exception as e: - output.error(f"Error listing backends: {e}") - raise diff --git a/src/mcp_cli/commands/actions/tools.py b/src/mcp_cli/commands/actions/tools.py deleted file mode 100644 index 36d361b7..00000000 --- a/src/mcp_cli/commands/actions/tools.py +++ /dev/null @@ -1,290 +0,0 @@ -# src/mcp_cli/commands/actions/tools.py -""" -Show **all tools** exposed by every connected MCP server, either as a -pretty Rich table or raw JSON. - -ENHANCED: Now includes validation status and filtering information. - -How to use ----------- -* Chat / interactive : `/tools`, `/tools --all`, `/tools --raw`, `/tools --validation` -* CLI script : `mcp-cli tools [--all|--raw|--validation]` - -Both the chat & CLI layers call :pyfunc:`tools_action_async`; the -blocking helper :pyfunc:`tools_action` exists only for legacy sync code. -""" - -from __future__ import annotations - -import json -import logging -from typing import Any - -# MCP-CLI helpers -from mcp_cli.ui.formatting import create_tools_table -from mcp_cli.tools.manager import ToolManager -from mcp_cli.utils.async_utils import run_blocking -from chuk_term.ui import output, format_table -from mcp_cli.context import get_context - -logger = logging.getLogger(__name__) - - -# ──────────────────────────────────────────────────────────────────────────────── -# async (canonical) implementation -# ──────────────────────────────────────────────────────────────────────────────── -async def tools_action_async( # noqa: D401 - *, - show_details: bool = False, - show_raw: bool = False, - show_validation: bool = False, - provider: str = "openai", -) -> list[dict[str, Any]]: - """ - Fetch the **deduplicated** tool list from *all* servers and print it. - - Parameters - ---------- - show_details - When *True*, include parameter schemas in the table. - show_raw - When *True*, dump raw JSON definitions instead of a table. - show_validation - When *True*, show validation status and errors. - provider - Provider to validate tools for (default: openai). - - Returns - ------- - list - The list of tool-metadata dictionaries (always JSON-serialisable). - """ - # Get context and tool manager - context = get_context() - tm = context.tool_manager - - if not tm: - output.error("No tool manager available") - return [] - - output.info("\nFetching tool catalogue from all servers…") - - if show_validation: - # Show validation-specific information - return await _show_validation_info(tm, provider) - - # Get tools based on whether validation is available - if hasattr(tm, "get_adapted_tools_for_llm"): - # Use validated tools - try: - valid_tools_defs, _ = await tm.get_adapted_tools_for_llm(provider) - - # Convert back to ToolInfo-like structure for display - all_tools = [] - for tool_def in valid_tools_defs: - func = tool_def.get("function", {}) - tool_name = func.get("name", "unknown") - - # Try to extract namespace from name - if "_" in tool_name: - parts = tool_name.split("_", 1) - namespace = parts[0] - name = parts[1] - else: - namespace = "unknown" - name = tool_name - - # Create a ToolInfo-like object - tool_info = type( - "ToolInfo", - (), - { - "name": name, - "namespace": namespace, - "description": func.get("description", ""), - "parameters": func.get("parameters", {}), - "is_async": False, - "tags": [], - "supports_streaming": False, - }, - )() - - all_tools.append(tool_info) - - # Show validation summary if available - if hasattr(tm, "get_validation_summary"): - summary = tm.get_validation_summary() - if summary.get("invalid_tools", 0) > 0: - output.print( - f"Note: {summary['invalid_tools']} tools filtered out due to validation errors" - ) - output.hint("Use --validation flag to see details") - - except Exception as e: - logger.warning( - f"Error getting validated tools, falling back to all tools: {e}" - ) - all_tools = await tm.get_unique_tools() - else: - # Fallback to original method - all_tools = await tm.get_unique_tools() - - if not all_tools: - output.warning("No tools available from any server.") - logger.debug("ToolManager returned an empty tools list") - return [] - - # ── raw JSON mode ─────────────────────────────────────────────────── - if show_raw: - payload = [ - { - "name": t.name, - "namespace": t.namespace, - "description": t.description, - "parameters": t.parameters, - "is_async": getattr(t, "is_async", False), - "tags": getattr(t, "tags", []), - "aliases": getattr(t, "aliases", []), - } - for t in all_tools - ] - # Use chuk_term's json output - json_str = json.dumps(payload, indent=2, ensure_ascii=False) - output.json(json_str) - - return payload - - # ── Rich table mode ───────────────────────────────────────────────── - table = create_tools_table(all_tools, show_details=show_details) - output.print_table(table) - output.success(f"Total tools available: {len(all_tools)}") - - # Show validation info if enhanced manager - if hasattr(tm, "get_validation_summary"): - summary = tm.get_validation_summary() - if summary.get("total_tools", 0) > len(all_tools): - output.print( - f"({summary['total_tools'] - len(all_tools)} tools hidden due to validation/filtering)" - ) - - # Return a safe JSON structure (no .to_dict() needed) - return [ - { - "name": t.name, - "namespace": t.namespace, - "description": t.description, - "parameters": t.parameters, - "is_async": getattr(t, "is_async", False), - "tags": getattr(t, "tags", []), - "aliases": getattr(t, "aliases", []), - } - for t in all_tools - ] - - -async def _show_validation_info(tm: ToolManager, provider: str) -> list[dict[str, Any]]: - """Show detailed validation information.""" - output.info(f"Tool Validation Report for {provider}") - - if not hasattr(tm, "get_validation_summary"): - output.print("Validation not available - using basic ToolManager") - return [] - - # Get validation summary - summary = tm.get_validation_summary() - - # Create validation summary table using chuk-term - summary_rows = [ - ["Total Tools", str(summary.get("total_tools", 0))], - ["Valid Tools", str(summary.get("valid_tools", 0))], - ["Invalid Tools", str(summary.get("invalid_tools", 0))], - ["User Disabled", str(summary.get("disabled_by_user", 0))], - ["Validation Disabled", str(summary.get("disabled_by_validation", 0))], - ] - - summary_table = format_table( - summary_rows, title="Validation Summary", columns=["Metric", "Count"] - ) - - output.print_table(summary_table) - - # Show validation errors - errors = summary.get("validation_errors", []) - if errors: - output.error(f"Validation Errors ({len(errors)}):") - - error_rows = [] - for error in errors[:10]: # Show first 10 errors - error_msg = error.get("error", "No error message") - if len(error_msg) > 80: - error_msg = error_msg[:80] + "..." - error_rows.append( - [ - error.get("tool", "unknown"), - error_msg, - error.get("reason", "unknown"), - ] - ) - - errors_table = format_table(error_rows, columns=["Tool", "Error", "Reason"]) - - output.print_table(errors_table) - - if len(errors) > 10: - output.info(f"... and {len(errors) - 10} more errors") - - # Show disabled tools - disabled = summary.get("disabled_tools", {}) - if disabled: - output.warning(f"Disabled Tools ({len(disabled)}):") - - disabled_rows = [[tool, reason] for tool, reason in disabled.items()] - - disabled_table = format_table(disabled_rows, columns=["Tool", "Reason"]) - - output.print_table(disabled_table) - - # Show auto-fix status - if hasattr(tm, "is_auto_fix_enabled"): - auto_fix_status = "Enabled" if tm.is_auto_fix_enabled() else "Disabled" - output.info(f"\nAuto-fix: {auto_fix_status}") - - # Show helpful commands - output.print("\nCommands:") - output.print(" • /tools-disable - Disable a tool") - output.print(" • /tools-enable - Enable a tool") - output.print(" • /tools-validate - Re-run validation") - output.print(" • /tools-autofix on - Enable auto-fixing") - - return [{"validation_summary": summary}] - - -# ──────────────────────────────────────────────────────────────────────────────── -# sync wrapper - for legacy CLI paths -# ──────────────────────────────────────────────────────────────────────────────── -def tools_action( - *, - show_details: bool = False, - show_raw: bool = False, - show_validation: bool = False, - provider: str = "openai", -) -> list[dict[str, Any]]: - """ - Blocking wrapper around :pyfunc:`tools_action_async`. - - Raises - ------ - RuntimeError - If called from inside a running event-loop. - """ - return run_blocking( - tools_action_async( - show_details=show_details, - show_raw=show_raw, - show_validation=show_validation, - provider=provider, - ) - ) - - -__all__ = ["tools_action_async", "tools_action"] diff --git a/src/mcp_cli/commands/actions/tools_call.py b/src/mcp_cli/commands/actions/tools_call.py deleted file mode 100644 index 3ecc778e..00000000 --- a/src/mcp_cli/commands/actions/tools_call.py +++ /dev/null @@ -1,105 +0,0 @@ -# src/mcp_cli/commands/actions/tools_call.py -""" -Open an *interactive “call a tool” wizard* that lets you pick a tool and -pass JSON arguments right from the terminal. - -Highlights ----------- -* Leaves **zero state** behind - safe to hot-reload while a chat/TUI is - running. -* Re-uses :pyfunc:`mcp_cli.tools.formatting.display_tool_call_result` - for pretty result rendering, so the output looks the same everywhere. -""" - -from __future__ import annotations -import asyncio -import json -import logging -from typing import Any - -# mcp cli -from chuk_term.ui import output -from mcp_cli.tools.models import ToolCallResult -from mcp_cli.ui.formatting import display_tool_call_result -from mcp_cli.context import get_context - -# logger -logger = logging.getLogger(__name__) - - -# ════════════════════════════════════════════════════════════════════════ -# Main entry-point (async coroutine) -# ════════════════════════════════════════════════════════════════════════ -async def tools_call_action() -> None: # noqa: D401 - """ - Launch the mini-wizard, execute the chosen tool, show the result. - - This function is designed for *interactive* use only - it blocks on - `input()` twice (tool selection & JSON args). - """ - # Get context and tool manager - context = get_context() - tm = context.tool_manager - - if not tm: - output.print("[red]Error:[/red] No tool manager available") - return - - cprint = output.print - - cprint("[cyan]\nTool Call Interface[/cyan]") - - # Fetch distinct tools (no duplicates across servers) - all_tools = await tm.get_unique_tools() - if not all_tools: - cprint("[yellow]No tools available from any server.[/yellow]") - return - - # ── list tools ──────────────────────────────────────────────────── - cprint("[green]Available tools:[/green]") - for idx, tool in enumerate(all_tools, 1): - desc = tool.description or "No description" - cprint(f" {idx}. {tool.name} (from {tool.namespace}) - {desc}") - - # ── user selection ──────────────────────────────────────────────── - sel_raw = await asyncio.to_thread(input, "\nEnter tool number to call: ") - try: - sel = int(sel_raw) - 1 - tool = all_tools[sel] - except (ValueError, IndexError): - cprint("[red]Invalid selection.[/red]") - return - - cprint(f"\n[green]Selected:[/green] {tool.name} from {tool.namespace}") - if tool.description: - cprint(f"[cyan]Description:[/cyan] {tool.description}") - - # ── argument collection ─────────────────────────────────────────── - params_schema: dict[str, Any] = tool.parameters or {} - args: dict[str, Any] = {} - - if params_schema.get("properties"): - cprint("\n[yellow]Enter arguments as JSON (leave blank for none):[/yellow]") - args_raw = await asyncio.to_thread(input, "> ") - if args_raw.strip(): - try: - args = json.loads(args_raw) - except json.JSONDecodeError: - cprint("[red]Invalid JSON - aborting.[/red]") - return - else: - cprint("[dim]Tool takes no arguments.[/dim]") - - # ── execution ───────────────────────────────────────────────────── - fq_name = f"{tool.namespace}.{tool.name}" - cprint(f"\n[cyan]Calling '{fq_name}'…[/cyan]") - - try: - result: ToolCallResult = await tm.execute_tool(fq_name, args) - display_tool_call_result(result) - except Exception as exc: # noqa: BLE001 - logger.exception("Error executing tool") - cprint(f"[red]Error: {exc}[/red]") - - -__all__ = ["tools_call_action"] diff --git a/src/mcp_cli/commands/actions/tools_confirm.py b/src/mcp_cli/commands/actions/tools_confirm.py deleted file mode 100644 index b4786ff6..00000000 --- a/src/mcp_cli/commands/actions/tools_confirm.py +++ /dev/null @@ -1,123 +0,0 @@ -# src/mcp_cli/commands/actions/tools_confirm.py -""" -Show **all tools** exposed by every connected MCP server, either as a -pretty Rich table or raw JSON. - -How to use ----------- -* Chat / interactive : `/tools`, `/tools --all`, `/tools --raw` -* CLI script : `mcp-cli tools [--all|--raw]` - -Both the chat & CLI layers call :pyfunc:`tools_action_async`; the -blocking helper :pyfunc:`tools_action` exists only for legacy sync code. -""" - -from __future__ import annotations - -import json -import logging -from typing import Any - -# MCP-CLI helpers -from mcp_cli.ui.formatting import create_tools_table -from mcp_cli.tools.manager import ToolManager -from mcp_cli.utils.async_utils import run_blocking -from chuk_term.ui import output - -logger = logging.getLogger(__name__) - - -# ──────────────────────────────────────────────────────────────────────────────── -# async (canonical) implementation -# ──────────────────────────────────────────────────────────────────────────────── -async def tools_action_async( # noqa: D401 - tm: ToolManager, - *, - show_details: bool = False, - show_raw: bool = False, -) -> list[dict[str, Any]]: - """ - Fetch the **deduplicated** tool list from *all* servers and print it. - - Parameters - ---------- - tm - A fully-initialised :class:`~mcp_cli.tools.manager.ToolManager`. - show_details - When *True*, include parameter schemas in the table. - show_raw - When *True*, dump raw JSON definitions instead of a table. - - Returns - ------- - list - The list of tool-metadata dictionaries (always JSON-serialisable). - """ - output.info("\nFetching tool catalogue from all servers…") - - all_tools = await tm.get_unique_tools() - if not all_tools: - output.warning("No tools available from any server.") - logger.debug("ToolManager returned an empty tools list") - return [] - - # ── raw JSON mode ─────────────────────────────────────────────────── - if show_raw: - payload = [ - { - "name": t.name, - "namespace": t.namespace, - "description": t.description, - "parameters": t.parameters, - "is_async": getattr(t, "is_async", False), - "tags": getattr(t, "tags", []), - "aliases": getattr(t, "aliases", []), - } - for t in all_tools - ] - output.json(json.dumps(payload, indent=2, ensure_ascii=False)) - return payload - - # ── Rich table mode ───────────────────────────────────────────────── - table = create_tools_table(all_tools, show_details=show_details) - output.print_table(table) - output.success(f"Total tools available: {len(all_tools)}") - - # Return a safe JSON structure (no .to_dict() needed) - return [ - { - "name": t.name, - "namespace": t.namespace, - "description": t.description, - "parameters": t.parameters, - "is_async": getattr(t, "is_async", False), - "tags": getattr(t, "tags", []), - "aliases": getattr(t, "aliases", []), - } - for t in all_tools - ] - - -# ──────────────────────────────────────────────────────────────────────────────── -# sync wrapper - for legacy CLI paths -# ──────────────────────────────────────────────────────────────────────────────── -def tools_action( - tm: ToolManager, - *, - show_details: bool = False, - show_raw: bool = False, -) -> list[dict[str, Any]]: - """ - Blocking wrapper around :pyfunc:`tools_action_async`. - - Raises - ------ - RuntimeError - If called from inside a running event-loop. - """ - return run_blocking( - tools_action_async(tm, show_details=show_details, show_raw=show_raw) - ) - - -__all__ = ["tools_action_async", "tools_action"] diff --git a/src/mcp_cli/commands/actions/tools_manage.py b/src/mcp_cli/commands/actions/tools_manage.py deleted file mode 100644 index f4bf63ec..00000000 --- a/src/mcp_cli/commands/actions/tools_manage.py +++ /dev/null @@ -1,209 +0,0 @@ -# src/mcp_cli/commands/actions/tools_manage.py -""" -from __future__ import annotations - -Tool management commands for enabling/disabling tools and validation. -""" - -import asyncio -import logging -from typing import Any - -from chuk_term.ui import output, format_table - -from mcp_cli.tools.manager import ToolManager -from mcp_cli.commands.enums import ToolCommand - -logger = logging.getLogger(__name__) - - -async def tools_manage_action_async( - tm: ToolManager, action: str, tool_name: str | None = None, **kwargs -) -> dict[str, Any]: - """ - Manage tools (enable/disable/validate). - - Args: - tm: Tool manager - action: Action to perform (enable, disable, validate, status, list-disabled) - tool_name: Tool name for specific actions - - Returns: - Action result dictionary - """ - - if action == ToolCommand.ENABLE.value: - if not tool_name: - output.error("Tool name required for enable action") - return {"success": False, "error": "Tool name required"} - - tm.enable_tool(tool_name) - output.success(f"✓ Enabled tool: {tool_name}") - return {"success": True, "action": "enable", "tool": tool_name} - - elif action == ToolCommand.DISABLE.value: - if not tool_name: - output.error("Tool name required for disable action") - return {"success": False, "error": "Tool name required"} - - tm.disable_tool(tool_name, reason="user") - output.warning(f"✗ Disabled tool: {tool_name}") - return {"success": True, "action": "disable", "tool": tool_name} - - elif action == ToolCommand.VALIDATE.value: - if tool_name: - # Validate single tool - is_valid, error_msg = await tm.validate_single_tool(tool_name) - if is_valid: - output.success(f"✓ Tool '{tool_name}' is valid") - else: - output.error(f"✗ Tool '{tool_name}' is invalid: {error_msg}") - - return { - "success": True, - "action": "validate", - "tool": tool_name, - "is_valid": is_valid, - "error": error_msg, - } - else: - # Validate all tools - provider = kwargs.get("provider", "openai") - output.info(f"Validating all tools for {provider}...") - - summary = await tm.revalidate_tools(provider) - - output.success("Validation complete:") - output.print(f" • Total tools: {summary.get('total_tools', 0)}") - output.print(f" • Valid: {summary.get('valid_tools', 0)}") - output.print(f" • Invalid: {summary.get('invalid_tools', 0)}") - - return {"success": True, "action": "validate_all", "summary": summary} - - elif action == ToolCommand.STATUS.value: - summary = tm.get_validation_summary() - - # Create status table data - table_data = [ - { - "Metric": "Total Tools", - "Value": str(summary.get("total_tools", "Unknown")), - }, - { - "Metric": "Valid Tools", - "Value": str(summary.get("valid_tools", "Unknown")), - }, - { - "Metric": "Invalid Tools", - "Value": str(summary.get("invalid_tools", "Unknown")), - }, - { - "Metric": "Disabled by User", - "Value": str(summary.get("disabled_by_user", 0)), - }, - { - "Metric": "Disabled by Validation", - "Value": str(summary.get("disabled_by_validation", 0)), - }, - { - "Metric": "Auto-fix Enabled", - "Value": "Yes" if summary.get("auto_fix_enabled", False) else "No", - }, - {"Metric": "Last Provider", "Value": str(summary.get("provider", "None"))}, - ] - - table = format_table( - table_data, title="Tool Management Status", columns=["Metric", "Value"] - ) - output.print_table(table) - return {"success": True, "action": "status", "summary": summary} - - elif action == ToolCommand.LIST_DISABLED.value: - disabled_tools = tm.get_disabled_tools() - - if not disabled_tools: - output.success("No disabled tools") - else: - # Build table data for disabled tools - table_data = [] - for tool, reason in disabled_tools.items(): - table_data.append({"Tool Name": tool, "Reason": reason}) - - table = format_table( - table_data, title="Disabled Tools", columns=["Tool Name", "Reason"] - ) - output.print_table(table) - - return { - "success": True, - "action": "list_disabled", - "disabled_tools": disabled_tools, - } - - elif action == ToolCommand.DETAILS.value: - if not tool_name: - output.error("Tool name required for details action") - return {"success": False, "error": "Tool name required"} - - details = tm.get_tool_validation_details(tool_name) - if not details: - output.error(f"Tool '{tool_name}' not found") - return {"success": False, "error": "Tool not found"} - - # Display details panel - status = ( - "Enabled" - if details["is_enabled"] - else f"Disabled ({details['disabled_reason']})" - ) - content = f"Status: {status}\n" - - if details["validation_error"]: - content += f"Validation Error: {details['validation_error']}\n" - - if details["can_auto_fix"]: - content += "Auto-fix: Available\n" - - output.panel(content, title=f"Tool Details: {tool_name}") - return { - "success": True, - "action": "details", - "tool": tool_name, - "details": details, - } - - elif action == ToolCommand.AUTO_FIX.value: - setting = kwargs.get("enabled", True) - tm.set_auto_fix_enabled(setting) - status = "enabled" if setting else "disabled" - output.info(f"Auto-fix {status}") - return {"success": True, "action": "auto_fix", "enabled": setting} - - elif action == ToolCommand.CLEAR_VALIDATION.value: - tm.clear_validation_disabled_tools() - output.success("Cleared all validation-disabled tools") - return {"success": True, "action": "clear_validation"} - - elif action == ToolCommand.VALIDATION_ERRORS.value: - summary = tm.get_validation_summary() - errors = summary.get("validation_errors", []) - - if not errors: - output.success("No validation errors") - else: - output.error(f"Found {len(errors)} validation errors:") - for error in errors: - output.print(f" • {error['tool']}: {error['error']}") - - return {"success": True, "action": "validation_errors", "errors": errors} - - else: - output.error(f"Unknown action: {action}") - return {"success": False, "error": f"Unknown action: {action}"} - - -def tools_manage_action( - tm: ToolManager, action: str, tool_name: str | None = None, **kwargs -) -> dict[str, Any]: - """Sync wrapper for tool management actions.""" - return asyncio.run(tools_manage_action_async(tm, action, tool_name, **kwargs)) diff --git a/src/mcp_cli/commands/base.py b/src/mcp_cli/commands/base.py index 4d265cbb..93b1fa2d 100644 --- a/src/mcp_cli/commands/base.py +++ b/src/mcp_cli/commands/base.py @@ -72,10 +72,6 @@ class UnifiedCommand(ABC): Commands implement this interface once and work in all modes. """ - def __init__(self): - """Initialize the command.""" - pass - @property @abstractmethod def name(self) -> str: diff --git a/src/mcp_cli/commands/conversation/__init__.py b/src/mcp_cli/commands/conversation/__init__.py new file mode 100644 index 00000000..5bec084b --- /dev/null +++ b/src/mcp_cli/commands/conversation/__init__.py @@ -0,0 +1,7 @@ +"""Conversation management commands.""" + +from mcp_cli.commands.conversation.conversation import ConversationCommand + +__all__ = [ + "ConversationCommand", +] diff --git a/src/mcp_cli/commands/definitions/conversation.py b/src/mcp_cli/commands/conversation/conversation.py similarity index 95% rename from src/mcp_cli/commands/definitions/conversation.py rename to src/mcp_cli/commands/conversation/conversation.py index 9b8e5a6d..e6b72bd8 100644 --- a/src/mcp_cli/commands/definitions/conversation.py +++ b/src/mcp_cli/commands/conversation/conversation.py @@ -162,7 +162,9 @@ async def execute(self, **kwargs) -> CommandResult: ) # Handle actions - if action == "show": + from mcp_cli.config import ConversationAction + + if action == ConversationAction.SHOW.value: # Show conversation history in table format if hasattr(chat_context, "conversation_history"): history = chat_context.conversation_history @@ -191,13 +193,14 @@ async def execute(self, **kwargs) -> CommandResult: content = "" # Format role with emoji - if role.lower() == "system": + role_lower = role.lower() + if role_lower == MessageRole.SYSTEM.value: role_display = "🔧 System" - elif role.lower() == "user": + elif role_lower == MessageRole.USER.value: role_display = "👤 User" - elif role.lower() == "assistant": + elif role_lower == MessageRole.ASSISTANT.value: role_display = "🤖 Assistant" - elif role.lower() == "tool": + elif role_lower == MessageRole.TOOL.value: role_display = "🔨 Tool" else: role_display = f"❓ {role.title()}" @@ -263,7 +266,7 @@ async def execute(self, **kwargs) -> CommandResult: error="Conversation history not available.", ) - elif action == "clear": + elif action == ConversationAction.CLEAR.value: # Clear conversation history if hasattr(chat_context, "clear_conversation"): chat_context.clear_conversation() @@ -277,7 +280,7 @@ async def execute(self, **kwargs) -> CommandResult: error="Cannot clear conversation history.", ) - elif action == "save": + elif action == ConversationAction.SAVE.value: # Save conversation filename = kwargs.get("filename") if not filename and "args" in kwargs: @@ -310,7 +313,7 @@ async def execute(self, **kwargs) -> CommandResult: error=f"Failed to save conversation: {str(e)}", ) - elif action == "load": + elif action == ConversationAction.LOAD.value: # Load conversation filename = kwargs.get("filename") if not filename and "args" in kwargs: diff --git a/src/mcp_cli/commands/core/__init__.py b/src/mcp_cli/commands/core/__init__.py new file mode 100644 index 00000000..1a9fa3ed --- /dev/null +++ b/src/mcp_cli/commands/core/__init__.py @@ -0,0 +1,15 @@ +"""Core system commands.""" + +from mcp_cli.commands.core.help import HelpCommand +from mcp_cli.commands.core.exit import ExitCommand +from mcp_cli.commands.core.clear import ClearCommand +from mcp_cli.commands.core.verbose import VerboseCommand +from mcp_cli.commands.core.interrupt import InterruptCommand + +__all__ = [ + "HelpCommand", + "ExitCommand", + "ClearCommand", + "VerboseCommand", + "InterruptCommand", +] diff --git a/src/mcp_cli/commands/definitions/clear.py b/src/mcp_cli/commands/core/clear.py similarity index 100% rename from src/mcp_cli/commands/definitions/clear.py rename to src/mcp_cli/commands/core/clear.py diff --git a/src/mcp_cli/commands/core/confirm.py b/src/mcp_cli/commands/core/confirm.py new file mode 100644 index 00000000..4d985479 --- /dev/null +++ b/src/mcp_cli/commands/core/confirm.py @@ -0,0 +1,113 @@ +# src/mcp_cli/commands/core/confirm.py +""" +Unified confirm command implementation (chat/interactive mode). + +Toggles tool call confirmation mode between always, never, and smart. +""" + +from __future__ import annotations + +from mcp_cli.commands.base import ( + UnifiedCommand, + CommandMode, + CommandParameter, + CommandResult, +) +from mcp_cli.utils.preferences import get_preference_manager, ConfirmationMode + + +class ConfirmCommand(UnifiedCommand): + """Toggle or set tool call confirmation mode.""" + + @property + def name(self) -> str: + return "confirm" + + @property + def aliases(self) -> list[str]: + return [] + + @property + def description(self) -> str: + return "Toggle tool call confirmation" + + @property + def help_text(self) -> str: + return """ +Toggle or set tool call confirmation mode. + +Usage: + /confirm - Toggle confirmation on/off + /confirm always - Always confirm before tool execution + /confirm never - Never confirm (auto-approve all tools) + /confirm smart - Smart mode: confirm based on risk level + +Modes: + always - Ask for confirmation before every tool call + never - Execute all tools without confirmation + smart - Only confirm high-risk tool calls (default) +""" + + @property + def modes(self) -> CommandMode: + return CommandMode.CHAT | CommandMode.INTERACTIVE + + @property + def parameters(self) -> list[CommandParameter]: + return [ + CommandParameter( + name="mode", + type=str, + required=False, + help="Confirmation mode: always, never, or smart", + choices=["always", "never", "smart", "on", "off"], + ), + ] + + async def execute(self, **kwargs) -> CommandResult: + """Execute the confirm command.""" + pref_manager = get_preference_manager() + + # Get current mode + current_mode = pref_manager.get_tool_confirmation_mode() + + # Get desired mode from args + mode_arg = kwargs.get("mode") + if not mode_arg and "args" in kwargs: + args_val = kwargs["args"] + if isinstance(args_val, list) and args_val: + mode_arg = args_val[0] + elif isinstance(args_val, str): + mode_arg = args_val + + if mode_arg: + # Map on/off to always/never + mode_lower = mode_arg.lower() + if mode_lower in ("on", "true", "1", "yes"): + mode_lower = "always" + elif mode_lower in ("off", "false", "0", "no"): + mode_lower = "never" + + if mode_lower not in ("always", "never", "smart"): + return CommandResult( + success=False, + error=f"Invalid mode: {mode_arg}. Use 'always', 'never', or 'smart'.", + ) + + pref_manager.set_tool_confirmation_mode(mode_lower) + new_mode = ConfirmationMode(mode_lower) + else: + # Toggle: always -> never -> smart -> always + if current_mode == ConfirmationMode.ALWAYS: + new_mode = ConfirmationMode.NEVER + elif current_mode == ConfirmationMode.NEVER: + new_mode = ConfirmationMode.SMART + else: + new_mode = ConfirmationMode.ALWAYS + + pref_manager.set_tool_confirmation_mode(new_mode.value) + + return CommandResult( + success=True, + output=f"Tool confirmation mode: {new_mode.value}", + ) diff --git a/src/mcp_cli/commands/definitions/exit.py b/src/mcp_cli/commands/core/exit.py similarity index 86% rename from src/mcp_cli/commands/definitions/exit.py rename to src/mcp_cli/commands/core/exit.py index c3555695..e7e35b04 100644 --- a/src/mcp_cli/commands/definitions/exit.py +++ b/src/mcp_cli/commands/core/exit.py @@ -54,12 +54,7 @@ def requires_context(self) -> bool: async def execute(self, **kwargs) -> CommandResult: """Execute the exit command.""" - # Import the exit action - from mcp_cli.commands.actions.exit import exit_action - - # Execute the action (interactive=True for chat/interactive modes) - exit_action(interactive=True) - + # Exit is simple - just set should_exit flag return CommandResult( success=True, output="Goodbye!", diff --git a/src/mcp_cli/commands/definitions/help.py b/src/mcp_cli/commands/core/help.py similarity index 100% rename from src/mcp_cli/commands/definitions/help.py rename to src/mcp_cli/commands/core/help.py diff --git a/src/mcp_cli/commands/definitions/interrupt.py b/src/mcp_cli/commands/core/interrupt.py similarity index 100% rename from src/mcp_cli/commands/definitions/interrupt.py rename to src/mcp_cli/commands/core/interrupt.py diff --git a/src/mcp_cli/commands/definitions/verbose.py b/src/mcp_cli/commands/core/verbose.py similarity index 100% rename from src/mcp_cli/commands/definitions/verbose.py rename to src/mcp_cli/commands/core/verbose.py diff --git a/src/mcp_cli/commands/definitions/__init__.py b/src/mcp_cli/commands/definitions/__init__.py deleted file mode 100644 index 8a5bbd56..00000000 --- a/src/mcp_cli/commands/definitions/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# src/mcp_cli/commands/definitions/__init__.py -""" -Command definitions for MCP CLI. - -This module contains all command class definitions that specify -the interface and parameters for each command. -""" - -from .clear import ClearCommand -from .conversation import ConversationCommand -from .exit import ExitCommand -from .help import HelpCommand -from .interrupt import InterruptCommand -from .models import ModelCommand -from .ping import PingCommand -from .prompts import PromptsCommand -from .providers import ProviderCommand -from .resources import ResourcesCommand -from .servers import ServersCommand -from .theme import ThemeCommand -from .token import TokenCommand -from .tool_history import ToolHistoryCommand -from .tools import ToolsCommand -from .verbose import VerboseCommand - -__all__ = [ - "ClearCommand", - "ConversationCommand", - "ExitCommand", - "HelpCommand", - "InterruptCommand", - "ModelCommand", - "PingCommand", - "PromptsCommand", - "ProviderCommand", - "ResourcesCommand", - "ServersCommand", - "ThemeCommand", - "TokenCommand", - "ToolHistoryCommand", - "ToolsCommand", - "VerboseCommand", -] diff --git a/src/mcp_cli/commands/definitions/models.py b/src/mcp_cli/commands/definitions/models.py deleted file mode 100644 index 67a45f13..00000000 --- a/src/mcp_cli/commands/definitions/models.py +++ /dev/null @@ -1,274 +0,0 @@ -# src/mcp_cli/commands/definitions/model.py -""" -Unified model command implementation. -Uses the existing enhanced model commands from mcp_cli.commands.model -""" - -from __future__ import annotations - - -from mcp_cli.commands.base import ( - UnifiedCommand, - CommandGroup, - CommandParameter, - CommandResult, -) - - -class ModelCommand(CommandGroup): - """Model command group.""" - - def __init__(self): - super().__init__() - # Add subcommands - self.add_subcommand(ModelListCommand()) - self.add_subcommand(ModelSetCommand()) - self.add_subcommand(ModelShowCommand()) - - @property - def name(self) -> str: - return "models" - - @property - def aliases(self) -> list[str]: - return ["model"] - - @property - def description(self) -> str: - return "Manage LLM models" - - @property - def help_text(self) -> str: - return """ -Manage LLM models for the current provider. - -Subcommands: - list - List available models - set - Set the active model - show - Show current model - -Usage: - /model - Show current model and available models - /models - List all models (preferred) - /model - Switch to a different model - /model list - List all models (alternative) - /model refresh - Refresh model discovery - -Examples: - /model gpt-4o-mini - Switch to gpt-4o-mini - /model set gpt-4 - Explicitly set to gpt-4 - /model show - Show current model - /model list - List all available models -""" - - async def execute(self, subcommand: str | None = None, **kwargs) -> CommandResult: - """Execute the model command - handle direct model switching.""" - from mcp_cli.commands.actions.models import model_action_async - from mcp_cli.commands.models import ModelActionParams - - # Check if we have args (could be model name or subcommand) - args = kwargs.get("args", []) - - if not args: - # No arguments - show current model status - try: - from mcp_cli.commands.models import ModelActionParams - - params = ModelActionParams(args=[]) - await model_action_async(params) - return CommandResult(success=True) - except Exception as e: - return CommandResult( - success=False, error=f"Failed to show model status: {str(e)}" - ) - - # Check if the first arg is a known subcommand - first_arg = args[0] if isinstance(args, list) else str(args) - - # Known subcommands that should be handled by subcommand classes - if first_arg.lower() in [ - "list", - "ls", - "set", - "use", - "switch", - "show", - "current", - "status", - ]: - # Let the parent class handle the subcommand routing - return await super().execute(**kwargs) - - # Otherwise, treat it as a model name to switch to - try: - from mcp_cli.commands.models import ModelActionParams - - # Pass the model name directly to switch - model_args = args if isinstance(args, list) else [str(args)] - params = ModelActionParams(args=model_args) - await model_action_async(params) - - return CommandResult(success=True) - except Exception as e: - return CommandResult( - success=False, error=f"Failed to switch model: {str(e)}" - ) - - -class ModelListCommand(UnifiedCommand): - """List available models.""" - - @property - def name(self) -> str: - return "list" - - @property - def aliases(self) -> list[str]: - return ["ls"] - - @property - def description(self) -> str: - return "List available models for the current provider" - - @property - def parameters(self) -> list[CommandParameter]: - return [ - CommandParameter( - name="provider", - type=str, - required=False, - help="Provider to list models for (uses current if not specified)", - ), - CommandParameter( - name="detailed", - type=bool, - default=False, - help="Show detailed model information", - is_flag=True, - ), - ] - - async def execute(self, **kwargs) -> CommandResult: - """Execute the model list command.""" - # Import the existing model implementation - from mcp_cli.commands.actions.models import model_action_async - - try: - # Use the existing enhanced implementation - # Pass "list" as the command - from mcp_cli.commands.models import ModelActionParams - - params = ModelActionParams(args=["list"]) - await model_action_async(params) - - # The existing implementation handles all output directly - # Just return success - return CommandResult(success=True, data={"command": "model list"}) - - except Exception as e: - return CommandResult( - success=False, - error=f"Failed to list models: {str(e)}", - ) - - -class ModelSetCommand(UnifiedCommand): - """Set the active model.""" - - @property - def name(self) -> str: - return "set" - - @property - def aliases(self) -> list[str]: - return ["use", "switch"] - - @property - def description(self) -> str: - return "Set the active model" - - @property - def parameters(self) -> list[CommandParameter]: - return [ - CommandParameter( - name="model_name", - type=str, - required=True, - help="Name of the model to set", - ), - ] - - async def execute(self, **kwargs) -> CommandResult: - """Execute the model set command.""" - # Import the existing model implementation - from mcp_cli.commands.actions.models import model_action_async - - # Get model name - model_name = kwargs.get("model_name") - if not model_name and "args" in kwargs: - args_val = kwargs["args"] - if isinstance(args_val, list): - model_name = args_val[0] if args_val else None - elif isinstance(args_val, str): - model_name = args_val - - if not model_name: - return CommandResult( - success=False, - error="Model name is required. Usage: /model set ", - ) - - try: - from mcp_cli.commands.models import ModelActionParams - - # Use the existing enhanced implementation - # Pass the model name directly to switch to it - params = ModelActionParams(args=[model_name]) - await model_action_async(params) - - # The existing implementation handles all output directly - return CommandResult(success=True, data={"model": model_name}) - - except Exception as e: - return CommandResult( - success=False, - error=f"Failed to set model: {str(e)}", - ) - - -class ModelShowCommand(UnifiedCommand): - """Show current model.""" - - @property - def name(self) -> str: - return "show" - - @property - def aliases(self) -> list[str]: - return ["current", "status"] - - @property - def description(self) -> str: - return "Show the current active model" - - async def execute(self, **kwargs) -> CommandResult: - """Execute the model show command.""" - # Import the existing model implementation - from mcp_cli.commands.actions.models import model_action_async - - try: - # Use the existing enhanced implementation - # Pass no arguments to show current status - from mcp_cli.commands.models import ModelActionParams - - params = ModelActionParams(args=[]) - await model_action_async(params) - - # The existing implementation handles all output directly - return CommandResult(success=True, data={"command": "model show"}) - - except Exception as e: - return CommandResult( - success=False, - error=f"Failed to get model info: {str(e)}", - ) diff --git a/src/mcp_cli/commands/definitions/server_singular.py b/src/mcp_cli/commands/definitions/server_singular.py deleted file mode 100644 index 3eeec38f..00000000 --- a/src/mcp_cli/commands/definitions/server_singular.py +++ /dev/null @@ -1,90 +0,0 @@ -# src/mcp_cli/commands/definitions/server_singular.py -""" -Server command - manages MCP servers (add, remove, enable, disable) and shows server details. -Supports both project servers (server_config.json) and user servers (~/.mcp-cli/preferences.json). -""" - -from __future__ import annotations - - -from mcp_cli.commands.base import ( - UnifiedCommand, - CommandResult, -) - - -class ServerSingularCommand(UnifiedCommand): - """Manage MCP servers - add, remove, enable, disable, or show details.""" - - @property - def name(self) -> str: - return "server" - - @property - def aliases(self) -> list[str]: - return [] # No aliases for singular form - - @property - def description(self) -> str: - return "Manage MCP servers or show server details" - - @property - def help_text(self) -> str: - return """ -Manage MCP servers or show details about a specific server. - -Usage: - /server - List all servers - /server - Show server details - /server list - List all servers - /server list all - Include disabled servers - -Server Management: - /server add stdio [args...] - Add STDIO server - /server add --transport http - Add HTTP server - /server add --transport sse - Add SSE server - /server remove - Remove user-added server - /server enable - Enable disabled server - /server disable - Disable server - /server ping - Test server connectivity - -Examples: - /server - List all servers - /server sqlite - Show sqlite server details - /server add time stdio uvx mcp-server-time - Add time server - /server add myapi --transport http --header "Authorization: Bearer token" -- https://api.example.com - /server disable sqlite - Disable sqlite server - /server remove time - Remove time server - -Note: User-added servers persist in ~/.mcp-cli/preferences.json -""" - - async def execute(self, **kwargs) -> CommandResult: - """Execute the server command.""" - from mcp_cli.commands.actions.servers import servers_action_async - - # Get args - handle both string and list - args = kwargs.get("args", []) - if isinstance(args, str): - args = [args] - elif not args: - args = [] - - if not args: - # No args - show list of servers - from mcp_cli.commands.models import ServerActionParams - - await servers_action_async(ServerActionParams()) - return CommandResult(success=True) - - try: - from mcp_cli.commands.models import ServerActionParams - - # Pass args to the enhanced servers action which handles all management - params = ServerActionParams(args=args) - await servers_action_async(params) - return CommandResult(success=True) - except Exception as e: - return CommandResult( - success=False, error=f"Failed to execute server command: {str(e)}" - ) diff --git a/src/mcp_cli/commands/definitions/servers.py b/src/mcp_cli/commands/definitions/servers.py deleted file mode 100644 index ae76305b..00000000 --- a/src/mcp_cli/commands/definitions/servers.py +++ /dev/null @@ -1,119 +0,0 @@ -# src/mcp_cli/commands/definitions/servers.py -""" -Unified servers command implementation. - -This single implementation works across all modes (chat, CLI, interactive). -""" - -from __future__ import annotations - - -from mcp_cli.commands.base import ( - UnifiedCommand, - CommandParameter, - CommandResult, -) - - -class ServersCommand(UnifiedCommand): - """List and manage MCP servers.""" - - @property - def name(self) -> str: - return "servers" - - @property - def aliases(self) -> list[str]: - return [] - - @property - def description(self) -> str: - return "List connected MCP servers and their status" - - @property - def help_text(self) -> str: - return """ -List connected MCP servers and their status. - -Usage: - /servers - List all connected servers - /servers --detailed - Show detailed server information - /servers --ping - Test server connectivity - -Options: - --detailed - Show detailed server information - --format [table|json] - Output format (default: table) - --ping - Test server connectivity - -Examples: - /servers - Show server status table - /servers --detailed - Show full server details - /servers --ping - Check server connectivity - -Note: For server management (add/remove/enable/disable), use /server command -""" - - @property - def parameters(self) -> list[CommandParameter]: - return [ - CommandParameter( - name="detailed", - type=bool, - default=False, - help="Show detailed server information", - is_flag=True, - ), - CommandParameter( - name="format", - type=str, - default="table", - help="Output format", - choices=["table", "json"], - ), - CommandParameter( - name="ping", - type=bool, - default=False, - help="Test server connectivity", - is_flag=True, - ), - ] - - async def execute(self, **kwargs) -> CommandResult: - """Execute the servers command.""" - # Import the servers action and models - from mcp_cli.commands.actions.servers import servers_action_async - from mcp_cli.commands.models import ServerActionParams - - # Extract parameters for the existing implementation - detailed = kwargs.get("detailed", False) - show_raw = kwargs.get("raw", False) - ping_servers = kwargs.get("ping", False) - output_format = kwargs.get("format", "table") - - # Check if there are additional arguments for management commands - args = kwargs.get("args", []) - - try: - # Create Pydantic model from parameters - params = ServerActionParams( - args=args if args else [], - detailed=detailed, - show_capabilities=show_raw, - output_format=output_format, - ping_servers=ping_servers, - ) - - # Use the existing enhanced implementation - # It handles all the display internally - server_info = await servers_action_async(params) - - # The existing implementation handles all output directly via output.print - # Just return success - return CommandResult(success=True, data=server_info) - - except Exception as e: - return CommandResult( - success=False, - error=f"Failed to execute server command: {str(e)}", - ) diff --git a/src/mcp_cli/commands/definitions/themes_plural.py b/src/mcp_cli/commands/definitions/themes_plural.py deleted file mode 100644 index ca47c778..00000000 --- a/src/mcp_cli/commands/definitions/themes_plural.py +++ /dev/null @@ -1,56 +0,0 @@ -# src/mcp_cli/commands/definitions/themes_plural.py -""" -Plural themes command - lists all available themes. -""" - -from __future__ import annotations - - -from mcp_cli.commands.base import ( - UnifiedCommand, - CommandResult, -) - - -class ThemesPluralCommand(UnifiedCommand): - """List all available themes.""" - - @property - def name(self) -> str: - return "themes" - - @property - def aliases(self) -> list[str]: - return [] # No aliases - - @property - def description(self) -> str: - return "List all available themes" - - @property - def help_text(self) -> str: - return """ -List all available UI themes. - -Usage: - /themes - List all available themes - -Examples: - /themes - Show all themes with descriptions -""" - - async def execute(self, **kwargs) -> CommandResult: - """Execute the themes command.""" - from mcp_cli.commands.actions.theme import theme_action_async - - try: - # Always show the list - from mcp_cli.commands.models import ThemeActionParams - - params = ThemeActionParams() - await theme_action_async(params) # Empty args = show list - return CommandResult(success=True) - except Exception as e: - return CommandResult( - success=False, error=f"Failed to list themes: {str(e)}" - ) diff --git a/src/mcp_cli/commands/definitions/token.py b/src/mcp_cli/commands/definitions/token.py deleted file mode 100644 index 58cd0a84..00000000 --- a/src/mcp_cli/commands/definitions/token.py +++ /dev/null @@ -1,163 +0,0 @@ -# src/mcp_cli/commands/definitions/token.py -""" -Unified token command implementation. -""" - -from __future__ import annotations - - -from mcp_cli.commands.base import ( - UnifiedCommand, - CommandMode, - CommandResult, -) -from mcp_cli.constants import OAUTH_NAMESPACE, GENERIC_NAMESPACE - - -class TokenCommand(UnifiedCommand): - """Manage OAuth and authentication tokens.""" - - @property - def name(self) -> str: - return "token" - - @property - def aliases(self) -> list[str]: - return ["tokens"] - - @property - def description(self) -> str: - return "Manage OAuth and authentication tokens" - - @property - def help_text(self) -> str: - return """ -Manage OAuth and authentication tokens. - -Usage: - /token - List all stored tokens (chat/interactive mode) - /token list - List all stored tokens - /token set - Store a bearer token - /token get - Get details for a specific token - /token clear - Clear all tokens (with confirmation) - /token clear --force - Clear all tokens without confirmation - /token delete - Delete a specific token - -Examples: - /token # Show all tokens - /token list # Show all tokens - /token set my-api secret-token # Store a bearer token - /token get my-api # Show token details - /token get notion # Show notion OAuth token details - /token clear # Clear all tokens (asks for confirmation) - /token delete my-api # Delete the token -""" - - @property - def modes(self) -> CommandMode: - """Token is for chat and interactive modes.""" - return CommandMode.CHAT | CommandMode.INTERACTIVE - - @property - def requires_context(self) -> bool: - """Token needs tool manager context to get server list.""" - return True - - async def execute(self, **kwargs) -> CommandResult: - """Execute the token command.""" - from chuk_term.ui import output - from mcp_cli.commands.actions.token import ( - token_list_action_async, - token_clear_action_async, - token_delete_action_async, - token_get_action_async, - token_set_action_async, - ) - - # Get args and tool_manager from kwargs - args = kwargs.get("args", []) - if isinstance(args, str): - args = [args] - - tool_manager = kwargs.get("tool_manager") - server_names = tool_manager.servers if tool_manager else [] - - # Default action is list if no args provided - if not args or len(args) == 0: - from mcp_cli.commands.models import TokenListParams - - params = TokenListParams(server_names=server_names) - await token_list_action_async(params) - return CommandResult(success=True) - - # Parse subcommand (first arg) - subcommand = args[0].lower() - - if subcommand == "list": - from mcp_cli.commands.models import TokenListParams - - params = TokenListParams(server_names=server_names) - await token_list_action_async(params) - return CommandResult(success=True) - - elif subcommand == "clear": - # Check for --force flag - force = "--force" in args or "-f" in args - from mcp_cli.commands.models import TokenClearParams - - clear_params = TokenClearParams(force=force) - await token_clear_action_async(clear_params) - return CommandResult(success=True) - - elif subcommand == "set": - if len(args) < 3: - output.error("Token name and value required for set command") - output.hint("Usage: /token set ") - return CommandResult(success=False) - - token_name = args[1] - token_value = args[2] - # Store as bearer token in generic namespace - from mcp_cli.commands.models import TokenSetParams - - set_params = TokenSetParams( - name=token_name, - value=token_value, - token_type="bearer", - namespace=GENERIC_NAMESPACE, - ) - await token_set_action_async(set_params) - return CommandResult(success=True) - - elif subcommand == "get": - if len(args) < 2: - output.error("Token name required for get command") - output.hint("Usage: /token get ") - return CommandResult(success=False) - - token_name = args[1] - # Try OAuth namespace first, then generic - await token_get_action_async(token_name, namespace=OAUTH_NAMESPACE) - # If not found in OAuth, try generic - await token_get_action_async(token_name, namespace=GENERIC_NAMESPACE) - return CommandResult(success=True) - - elif subcommand == "delete": - if len(args) < 2: - output.error("Token name required for delete command") - output.hint("Usage: /token delete ") - return CommandResult(success=False) - - token_name = args[1] - # OAuth tokens are the most common use case in chat - from mcp_cli.commands.models import TokenDeleteParams - - delete_params = TokenDeleteParams(name=token_name, oauth=True) - await token_delete_action_async(delete_params) - return CommandResult(success=True) - - else: - output.error(f"Unknown token subcommand: {subcommand}") - output.hint("Available: list, set, get, clear, delete") - output.hint("Type /help token for more information") - return CommandResult(success=False) diff --git a/src/mcp_cli/commands/enums.py b/src/mcp_cli/commands/enums.py index 53fed574..8dcfa7a1 100644 --- a/src/mcp_cli/commands/enums.py +++ b/src/mcp_cli/commands/enums.py @@ -2,13 +2,27 @@ """ Enums and constants for command actions. -Centralized enums to replace hardcoded strings throughout the codebase. +NOTE: Core enums (TokenAction, ServerAction, ToolAction, OutputFormat, +TokenNamespace, etc.) are defined in mcp_cli.config.enums as the canonical +source. TransportType is in mcp_cli.tools.models. TokenSource is in +mcp_cli.commands.models.provider. + +This file contains command-specific enums that don't belong in config. """ from __future__ import annotations from enum import Enum +# Re-export canonical enums for convenience +from mcp_cli.config.enums import ( # noqa: F401 + OutputFormat, + TokenAction, + TokenNamespace, + ServerAction, + ToolAction, +) + class CommandAction(str, Enum): """Common command actions across different command types.""" @@ -32,32 +46,6 @@ class CommandAction(str, Enum): DIAGNOSTIC = "diagnostic" -class TokenNamespace(str, Enum): - """Token storage namespaces.""" - - GENERIC = "generic" - PROVIDER = "provider" - BEARER = "bearer" - API_KEY = "api-key" - OAUTH = "oauth" - - -class TransportType(str, Enum): - """Server transport types.""" - - STDIO = "stdio" - SSE = "sse" - HTTP = "http" - - -class OutputFormat(str, Enum): - """Output format types.""" - - JSON = "json" - TABLE = "table" - TEXT = "text" - - class ProviderCommand(str, Enum): """Provider-specific commands.""" @@ -70,14 +58,6 @@ class ProviderCommand(str, Enum): CUSTOM = "custom" -class TokenSource(str, Enum): - """Where a token comes from.""" - - ENV = "env" - STORAGE = "storage" - NONE = "none" - - class ServerCommand(str, Enum): """Server management commands.""" diff --git a/src/mcp_cli/commands/models/cmd.py b/src/mcp_cli/commands/models/cmd.py index da52d632..2c8222a2 100644 --- a/src/mcp_cli/commands/models/cmd.py +++ b/src/mcp_cli/commands/models/cmd.py @@ -136,4 +136,4 @@ class CmdActionParams(CommandBaseModel): single_turn: bool = Field( default=False, description="Disable multi-turn conversation" ) - max_turns: int = Field(default=30, description="Maximum conversation turns") + max_turns: int = Field(default=100, description="Maximum conversation turns") diff --git a/src/mcp_cli/commands/providers/__init__.py b/src/mcp_cli/commands/providers/__init__.py new file mode 100644 index 00000000..3f685e89 --- /dev/null +++ b/src/mcp_cli/commands/providers/__init__.py @@ -0,0 +1,11 @@ +"""Provider and model commands.""" + +from mcp_cli.commands.providers.providers import ProviderCommand +from mcp_cli.commands.providers.provider_singular import ProviderSingularCommand +from mcp_cli.commands.providers.models import ModelCommand + +__all__ = [ + "ProviderCommand", + "ProviderSingularCommand", + "ModelCommand", +] diff --git a/src/mcp_cli/commands/providers/models.py b/src/mcp_cli/commands/providers/models.py new file mode 100644 index 00000000..58f15489 --- /dev/null +++ b/src/mcp_cli/commands/providers/models.py @@ -0,0 +1,420 @@ +# src/mcp_cli/commands/definitions/model.py +""" +Unified model command implementation. +Uses the existing enhanced model commands from mcp_cli.commands.model +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from mcp_cli.commands.base import ( + UnifiedCommand, + CommandGroup, + CommandParameter, + CommandResult, +) + +if TYPE_CHECKING: + from mcp_cli.commands.models.model import ModelInfo + + +class ModelCommand(CommandGroup): + """Model command group.""" + + def __init__(self): + super().__init__() + # Add subcommands + self.add_subcommand(ModelListCommand()) + self.add_subcommand(ModelSetCommand()) + self.add_subcommand(ModelShowCommand()) + + @property + def name(self) -> str: + return "models" + + @property + def aliases(self) -> list[str]: + return ["model"] + + @property + def description(self) -> str: + return "Manage LLM models" + + @property + def help_text(self) -> str: + return """ +Manage LLM models for the current provider. + +Subcommands: + list - List available models + set - Set the active model + show - Show current model + +Usage: + /model - Show current model and available models + /models - List all models (preferred) + /model - Switch to a different model + /model list - List all models (alternative) + /model refresh - Refresh model discovery + +Examples: + /model gpt-4o-mini - Switch to gpt-4o-mini + /model set gpt-4 - Explicitly set to gpt-4 + /model show - Show current model + /model list - List all available models +""" + + async def execute(self, subcommand: str | None = None, **kwargs) -> CommandResult: + """Execute the model command - handle direct model switching.""" + from mcp_cli.context import get_context + from chuk_term.ui import output + + args = kwargs.get("args", []) + + # No arguments - delegate to list subcommand to show all models + if not args: + list_cmd = self.subcommands.get("list") + if list_cmd: + return await list_cmd.execute(**kwargs) + return CommandResult(success=False, error="List subcommand not available") + + first_arg = args[0] if isinstance(args, list) else str(args) + + # Known subcommands - let parent class handle routing + if first_arg.lower() in [ + "list", + "ls", + "set", + "use", + "switch", + "show", + "current", + "status", + ]: + return await super().execute(**kwargs) + + # Otherwise, treat it as a model name to switch to + try: + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") + + current_provider = context.model_manager.get_active_provider() + context.model_manager.switch_model(current_provider, first_arg) + output.success(f"Switched to model: {first_arg}") + + return CommandResult(success=True) + except Exception as e: + return CommandResult( + success=False, error=f"Failed to switch model: {str(e)}" + ) + + +class ModelListCommand(UnifiedCommand): + """List available models.""" + + @property + def name(self) -> str: + return "list" + + @property + def aliases(self) -> list[str]: + return ["ls"] + + @property + def description(self) -> str: + return "List available models for the current provider" + + @property + def parameters(self) -> list[CommandParameter]: + return [ + CommandParameter( + name="provider", + type=str, + required=False, + help="Provider to list models for (uses current if not specified)", + ), + CommandParameter( + name="detailed", + type=bool, + default=False, + help="Show detailed model information", + is_flag=True, + ), + ] + + async def execute(self, **kwargs) -> CommandResult: + """Execute the model list command.""" + from mcp_cli.context import get_context + from chuk_term.ui import output, format_table + + try: + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") + + current_provider = context.model_manager.get_active_provider() + current_model = context.model_manager.get_active_model() + + # Discover models for the current provider + model_infos = self._discover_models(current_provider, current_model) + + if not model_infos: + output.warning( + f"No models discovered for {current_provider}. " + "Check API key configuration." + ) + return CommandResult(success=True, data={"command": "model list"}) + + # Build table data from Pydantic models + table_data = [] + for model_info in model_infos: + table_data.append( + { + "": "✓" if model_info.is_current else "", + "Model": model_info.name, + } + ) + + # Display table + table = format_table( + table_data, + title=f"{len(model_infos)} Models for {current_provider}", + columns=["", "Model"], + ) + output.print_table(table) + + return CommandResult(success=True, data={"command": "model list"}) + + except Exception as e: + return CommandResult( + success=False, + error=f"Failed to list models: {str(e)}", + ) + + def _discover_models(self, provider: str, current_model: str) -> list["ModelInfo"]: + """Discover available models for a provider.""" + from mcp_cli.commands.models.model import ModelInfo + from mcp_cli.config import PROVIDER_OLLAMA + + # For Ollama, get actual running models from CLI + if provider.lower() == PROVIDER_OLLAMA: + model_names = self._get_ollama_models() + else: + # Get models from chuk_llm (already filters out placeholders) + model_names = self._get_provider_models(provider) + + # Convert to Pydantic ModelInfo objects + return [ + ModelInfo( + name=name, + provider=provider, + is_current=(name == current_model), + ) + for name in model_names + ] + + def _get_ollama_models(self) -> list[str]: + """Get models from Ollama CLI.""" + import subprocess + + try: + result = subprocess.run( + ["ollama", "list"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + lines = result.stdout.strip().split("\n") + models = [] + for line in lines[1:]: # Skip header + if line.strip(): + parts = line.split() + if parts: + models.append(parts[0]) + return models + except Exception: + pass + return [] + + def _get_provider_models(self, provider: str) -> list[str]: + """Get models from chuk_llm for a provider. + + Tries multiple strategies: + 1. Get from chuk_llm's cached provider info + 2. If only "*" placeholder, call the provider's /models API endpoint + 3. Fall back to default_model if available + """ + try: + from chuk_llm.llm.client import list_available_providers + + providers_info = list_available_providers() + provider_info = providers_info.get(provider, {}) + + if isinstance(provider_info, dict): + models = provider_info.get( + "models", provider_info.get("available_models", []) + ) + model_list = list(models) if models else [] + + # Filter out placeholder "*" values + model_list = [m for m in model_list if m and m != "*"] + + # If only placeholder models, try calling the API + if not model_list and provider_info.get("has_api_key"): + api_base = provider_info.get("api_base") + if api_base: + model_list = self._fetch_models_from_api(provider, api_base) + + # Fall back to default_model if still empty + if not model_list: + default_model = provider_info.get("default_model") + if default_model: + model_list = [default_model] + + return model_list + except Exception: + pass + return [] + + def _fetch_models_from_api(self, provider: str, api_base: str) -> list[str]: + """Fetch models from provider's /models API endpoint. + + Works for OpenAI-compatible APIs (deepseek, openai, etc.) + """ + import os + + try: + import httpx + + # Get API key from environment + api_key = os.environ.get(f"{provider.upper()}_API_KEY") + if not api_key: + return [] + + # Ensure api_base ends properly for /models endpoint + models_url = f"{api_base.rstrip('/')}/models" + + resp = httpx.get( + models_url, + headers={"Authorization": f"Bearer {api_key}"}, + timeout=5.0, + ) + + if resp.status_code == 200: + data = resp.json() + if isinstance(data, dict) and "data" in data: + # OpenAI-compatible format: {"data": [{"id": "model-name", ...}]} + return [m.get("id") for m in data["data"] if m.get("id")] + + except Exception: + pass + return [] + + +class ModelSetCommand(UnifiedCommand): + """Set the active model.""" + + @property + def name(self) -> str: + return "set" + + @property + def aliases(self) -> list[str]: + return ["use", "switch"] + + @property + def description(self) -> str: + return "Set the active model" + + @property + def parameters(self) -> list[CommandParameter]: + return [ + CommandParameter( + name="model_name", + type=str, + required=True, + help="Name of the model to set", + ), + ] + + async def execute(self, **kwargs) -> CommandResult: + """Execute the model set command.""" + from mcp_cli.context import get_context + from chuk_term.ui import output + + # Get model name + model_name = kwargs.get("model_name") + if not model_name and "args" in kwargs: + args_val = kwargs["args"] + if isinstance(args_val, list): + model_name = args_val[0] if args_val else None + elif isinstance(args_val, str): + model_name = args_val + + if not model_name: + return CommandResult( + success=False, + error="Model name is required. Usage: /model set ", + ) + + try: + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") + + current_provider = context.model_manager.get_active_provider() + context.model_manager.switch_model(current_provider, model_name) + output.success(f"Switched to model: {model_name}") + + return CommandResult(success=True, data={"model": model_name}) + + except Exception as e: + return CommandResult( + success=False, + error=f"Failed to set model: {str(e)}", + ) + + +class ModelShowCommand(UnifiedCommand): + """Show current model.""" + + @property + def name(self) -> str: + return "show" + + @property + def aliases(self) -> list[str]: + return ["current", "status"] + + @property + def description(self) -> str: + return "Show the current active model" + + async def execute(self, **kwargs) -> CommandResult: + """Execute the model show command.""" + from mcp_cli.context import get_context + from chuk_term.ui import output + + try: + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") + + current_model = context.model_manager.get_active_model() + current_provider = context.model_manager.get_active_provider() + + output.panel( + f"Provider: {current_provider}\nModel: {current_model}", + title="Current Model", + ) + + return CommandResult(success=True, data={"command": "model show"}) + + except Exception as e: + return CommandResult( + success=False, + error=f"Failed to get model info: {str(e)}", + ) diff --git a/src/mcp_cli/commands/definitions/provider_singular.py b/src/mcp_cli/commands/providers/provider_singular.py similarity index 62% rename from src/mcp_cli/commands/definitions/provider_singular.py rename to src/mcp_cli/commands/providers/provider_singular.py index 47a16a60..f9e94fbf 100644 --- a/src/mcp_cli/commands/definitions/provider_singular.py +++ b/src/mcp_cli/commands/providers/provider_singular.py @@ -44,7 +44,8 @@ def help_text(self) -> str: async def execute(self, **kwargs) -> CommandResult: """Execute the provider command.""" - from mcp_cli.commands.actions.providers import provider_action_async + from mcp_cli.context import get_context + from chuk_term.ui import output # Get args args = kwargs.get("args", []) @@ -52,10 +53,19 @@ async def execute(self, **kwargs) -> CommandResult: if not args: # No arguments - show current status (singular behavior) try: - from mcp_cli.commands.models import ProviderActionParams + context = get_context() + if not context or not context.model_manager: + return CommandResult( + success=False, error="No LLM manager available." + ) - params = ProviderActionParams(args=[]) - await provider_action_async(params) # Empty args = show status + current_provider = context.model_manager.get_active_provider() + current_model = context.model_manager.get_active_model() + + output.panel( + f"Provider: {current_provider}\nModel: {current_model}", + title="Current Provider Status", + ) return CommandResult(success=True) except Exception as e: return CommandResult( @@ -67,26 +77,23 @@ async def execute(self, **kwargs) -> CommandResult: # If it's a known subcommand, handle it if first_arg.lower() in ["list", "ls", "set"]: - # Delegate to the action - try: - from mcp_cli.commands.models import ProviderActionParams - - args_list = args if isinstance(args, list) else [str(args)] - params = ProviderActionParams(args=args_list) - await provider_action_async(params) - return CommandResult(success=True) - except Exception as e: - return CommandResult( - success=False, error=f"Command failed: {str(e)}" - ) + # These should be handled by the providers command group + return CommandResult( + success=False, + error=f"Use /providers {first_arg} for this command", + ) else: # Treat as provider name to switch to try: - from mcp_cli.commands.models import ProviderActionParams - - args_list = args if isinstance(args, list) else [str(args)] - params = ProviderActionParams(args=args_list) - await provider_action_async(params) + context = get_context() + if not context or not context.model_manager: + return CommandResult( + success=False, error="No LLM manager available." + ) + + provider_name = first_arg + context.model_manager.switch_provider(provider_name) + output.success(f"Switched to provider: {provider_name}") return CommandResult(success=True) except Exception as e: diff --git a/src/mcp_cli/commands/definitions/providers.py b/src/mcp_cli/commands/providers/providers.py similarity index 51% rename from src/mcp_cli/commands/definitions/providers.py rename to src/mcp_cli/commands/providers/providers.py index 032e55e2..7c92f8c2 100644 --- a/src/mcp_cli/commands/definitions/providers.py +++ b/src/mcp_cli/commands/providers/providers.py @@ -6,6 +6,7 @@ from __future__ import annotations +from typing import TYPE_CHECKING from mcp_cli.commands.base import ( UnifiedCommand, @@ -14,6 +15,9 @@ CommandResult, ) +if TYPE_CHECKING: + from mcp_cli.commands.models.provider import ProviderStatus + class ProviderCommand(CommandGroup): """Provider command group.""" @@ -89,41 +93,21 @@ def help_text(self) -> str: async def execute(self, subcommand: str | None = None, **kwargs) -> CommandResult: """Execute the provider command - handle direct provider switching.""" - from mcp_cli.commands.actions.providers import provider_action_async + from mcp_cli.context import get_context + from chuk_term.ui import output - # Check if we have args (could be provider name or subcommand) args = kwargs.get("args", []) + # No arguments - delegate to list subcommand to show providers with status if not args: - # No arguments - behavior depends on which command was used - # /providers (plural) -> list all providers - # /provider (singular) -> show current status - - # Try to determine which command was used (this is a heuristic) - # In chat context, we might not have this info, so default to status for /provider - # and list for /providers based on the primary command name + list_cmd = self.subcommands.get("list") + if list_cmd: + return await list_cmd.execute(**kwargs) + return CommandResult(success=False, error="List subcommand not available") - # Since this is the ProviderCommand class with name="providers", - # when called without args as /providers, show the list - # When called as /provider (alias), it will still come here but we default to list - # for consistency with /models behavior - - try: - # Default to list when no arguments (like /models does) - from mcp_cli.commands.models import ProviderActionParams - - params = ProviderActionParams(args=["list"]) - await provider_action_async(params) - return CommandResult(success=True) - except Exception as e: - return CommandResult( - success=False, error=f"Failed to list providers: {str(e)}" - ) - - # Check if the first arg is a known subcommand first_arg = args[0] if isinstance(args, list) else str(args) - # Known subcommands that should be handled by subcommand classes + # Known subcommands - let parent class handle routing if first_arg.lower() in [ "list", "ls", @@ -134,19 +118,17 @@ async def execute(self, subcommand: str | None = None, **kwargs) -> CommandResul "current", "status", ]: - # Let the parent class handle the subcommand routing return await super().execute(**kwargs) # Otherwise, treat it as a provider name to switch to try: - # Pass the provider name directly to switch - from mcp_cli.commands.models import ProviderActionParams - - if isinstance(args, list): - params = ProviderActionParams(args=args) - else: - params = ProviderActionParams(args=[str(args)]) - await provider_action_async(params) + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") + + context.model_manager.switch_provider(first_arg) + output.success(f"Switched to provider: {first_arg}") + return CommandResult(success=True) except Exception as e: return CommandResult( @@ -183,20 +165,65 @@ def parameters(self) -> list[CommandParameter]: async def execute(self, **kwargs) -> CommandResult: """Execute the provider list command.""" - # Import the existing provider implementation - from mcp_cli.commands.actions.providers import provider_action_async + from mcp_cli.context import get_context + from chuk_term.ui import output, format_table + from mcp_cli.commands.models.provider import ProviderData try: - # Use the existing enhanced implementation - # It handles all the display internally with rich formatting - # Pass "list" as the command - from mcp_cli.commands.models import ProviderActionParams + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") + + current_provider = context.model_manager.get_active_provider() + + # Get rich provider info from chuk_llm + providers: list[ProviderData] = [] + try: + from chuk_llm.llm.client import list_available_providers + + providers_dict = list_available_providers() + for name, info in providers_dict.items(): + if isinstance(info, dict) and "error" not in info: + providers.append( + ProviderData( + name=name, + has_api_key=info.get("has_api_key", False), + models=info.get("models", []), + available_models=info.get("available_models", []), + default_model=info.get("default_model"), + ) + ) + except Exception: + pass + + # Build table data with status info + table_data = [] + for provider in providers: + is_current = "✓" if provider.name == current_provider else "" + status = self._get_provider_status(provider) + + # Get default model for display + default_model = provider.default_model or "-" + if default_model and len(default_model) > 25: + default_model = default_model[:22] + "..." + + table_data.append( + { + "": is_current, + "Provider": provider.name, + "Default Model": default_model, + "Status": f"{status.icon} {status.text}", + } + ) - params = ProviderActionParams(args=["list"]) - await provider_action_async(params) + # Display table + table = format_table( + table_data, + title=f"{len(table_data)} Available Providers", + columns=["", "Provider", "Default Model", "Status"], + ) + output.print_table(table) - # The existing implementation handles all output directly - # Just return success return CommandResult(success=True, data={"command": "provider list"}) except Exception as e: @@ -205,6 +232,59 @@ async def execute(self, **kwargs) -> CommandResult: error=f"Failed to list providers: {str(e)}", ) + def _get_provider_status(self, provider) -> "ProviderStatus": + """Get the status for a provider.""" + from mcp_cli.commands.models.provider import ProviderStatus + from mcp_cli.config import PROVIDER_OLLAMA + + # Special handling for Ollama (no API key needed) + if provider.name.lower() == PROVIDER_OLLAMA: + try: + import subprocess + + result = subprocess.run( + ["ollama", "list"], + capture_output=True, + text=True, + timeout=2, + ) + if result.returncode == 0: + lines = result.stdout.strip().split("\n") + model_count = len([line for line in lines[1:] if line.strip()]) + return ProviderStatus( + icon="✅", + text=f"Running ({model_count} models)", + reason="Ollama server is responding", + ) + return ProviderStatus( + icon="❌", text="Not running", reason="Ollama server not responding" + ) + except Exception: + return ProviderStatus( + icon="❌", text="Not available", reason="Ollama not installed" + ) + + # Standard providers + if provider.has_api_key: + model_count = provider.model_count + if model_count > 0: + return ProviderStatus( + icon="✅", + text=f"Configured ({model_count} models)", + reason="API key set and models available", + ) + return ProviderStatus( + icon="⚠️", + text="API key set", + reason="No models discovered yet", + ) + + return ProviderStatus( + icon="❌", + text="No API key", + reason=f"Set {provider.name.upper()}_API_KEY environment variable", + ) + class ProviderSetCommand(UnifiedCommand): """Set the active provider.""" @@ -234,8 +314,8 @@ def parameters(self) -> list[CommandParameter]: async def execute(self, **kwargs) -> CommandResult: """Execute the provider set command.""" - # Import the existing provider implementation - from mcp_cli.commands.actions.providers import provider_action_async + from mcp_cli.context import get_context + from chuk_term.ui import output # Get provider name provider_name = kwargs.get("provider_name") @@ -253,14 +333,13 @@ async def execute(self, **kwargs) -> CommandResult: ) try: - # Use the existing enhanced implementation - # Pass the provider name directly to switch to it - from mcp_cli.commands.models import ProviderActionParams + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") - params = ProviderActionParams(args=[provider_name]) - await provider_action_async(params) + context.model_manager.switch_provider(provider_name) + output.success(f"Switched to provider: {provider_name}") - # The existing implementation handles all output directly return CommandResult(success=True, data={"provider": provider_name}) except Exception as e: @@ -287,18 +366,22 @@ def description(self) -> str: async def execute(self, **kwargs) -> CommandResult: """Execute the provider show command.""" - # Import the existing provider implementation - from mcp_cli.commands.actions.providers import provider_action_async + from mcp_cli.context import get_context + from chuk_term.ui import output try: - from mcp_cli.commands.models import ProviderActionParams + context = get_context() + if not context or not context.model_manager: + return CommandResult(success=False, error="No LLM manager available.") + + current_provider = context.model_manager.get_active_provider() + current_model = context.model_manager.get_active_model() - # Use the existing enhanced implementation - # Pass no arguments to show current status - params = ProviderActionParams(args=[]) - await provider_action_async(params) + output.panel( + f"Provider: {current_provider}\nModel: {current_model}", + title="Current Provider", + ) - # The existing implementation handles all output directly return CommandResult(success=True, data={"command": "provider show"}) except Exception as e: diff --git a/src/mcp_cli/commands/registry.py b/src/mcp_cli/commands/registry.py index 073f15af..4c6de64e 100644 --- a/src/mcp_cli/commands/registry.py +++ b/src/mcp_cli/commands/registry.py @@ -23,7 +23,6 @@ class UnifiedCommandRegistry: _instance: UnifiedCommandRegistry | None = None _commands: dict[str, UnifiedCommand] = {} - _groups: dict[str, CommandGroup] = {} _initialized: bool = False def __new__(cls): @@ -36,7 +35,6 @@ def __init__(self): """Initialize the registry.""" if not self._initialized: self._commands = {} - self._groups = {} self._initialized = True def register(self, command: UnifiedCommand, group: str | None = None) -> None: @@ -45,28 +43,21 @@ def register(self, command: UnifiedCommand, group: str | None = None) -> None: Args: command: The command to register. - group: Optional group name (e.g., 'tools' for 'tools list'). + group: Deprecated, ignored. Groups register as regular commands + and dispatch subcommands via CommandGroup.get(). """ if group: - # Register as a subcommand in a group - if group not in self._groups: - # Groups must be pre-registered, cannot create them dynamically - logger.warning( - f"Group '{group}' not found for command '{command.name}'" - ) - return - - self._groups[group].add_subcommand(command) - logger.debug(f"Registered subcommand: {group} {command.name}") - else: - # Register as a top-level command - self._commands[command.name] = command - - # Also register aliases - for alias in command.aliases: - self._commands[alias] = command - - logger.debug(f"Registered command: {command.name}") + logger.debug( + f"Ignoring group='{group}' for '{command.name}' (groups are auto-dispatched)" + ) + + self._commands[command.name] = command + + # Also register aliases + for alias in command.aliases: + self._commands[alias] = command + + logger.debug(f"Registered command: {command.name}") def get(self, name: str, mode: CommandMode | None = None) -> UnifiedCommand | None: """ @@ -171,7 +162,6 @@ def get_command_names( def clear(self) -> None: """Clear all registered commands (useful for testing).""" self._commands.clear() - self._groups.clear() @classmethod def reset(cls) -> None: diff --git a/src/mcp_cli/commands/resources/__init__.py b/src/mcp_cli/commands/resources/__init__.py new file mode 100644 index 00000000..1dbb903e --- /dev/null +++ b/src/mcp_cli/commands/resources/__init__.py @@ -0,0 +1,9 @@ +"""Resource management commands.""" + +from mcp_cli.commands.resources.resources import ResourcesCommand +from mcp_cli.commands.resources.prompts import PromptsCommand + +__all__ = [ + "ResourcesCommand", + "PromptsCommand", +] diff --git a/src/mcp_cli/commands/definitions/prompts.py b/src/mcp_cli/commands/resources/prompts.py similarity index 64% rename from src/mcp_cli/commands/definitions/prompts.py rename to src/mcp_cli/commands/resources/prompts.py index bfb279d1..ea9943f0 100644 --- a/src/mcp_cli/commands/definitions/prompts.py +++ b/src/mcp_cli/commands/resources/prompts.py @@ -77,20 +77,45 @@ def parameters(self) -> list[CommandParameter]: async def execute(self, **kwargs) -> CommandResult: """Execute the prompts command.""" - # Import the existing prompts implementation - from mcp_cli.commands.actions.prompts import prompts_action_async + from mcp_cli.context import get_context + from chuk_term.ui import format_table try: - # Use the existing enhanced implementation - # It handles all the display internally - prompts = await prompts_action_async() - - # Add count info if we have prompts - if prompts: - output.print(f"\nTotal prompts: {len(prompts)}") + # Get context and tool manager + context = get_context() + if not context or not context.tool_manager: + return CommandResult( + success=False, + error="No tool manager available. Please connect to a server first.", + ) + + # Get prompts from tool manager + prompts = await context.tool_manager.list_prompts() + + if not prompts: + return CommandResult( + success=True, + output="No prompts available.", + ) + + # Build table data + table_data = [] + for prompt in prompts: + table_data.append( + { + "Name": prompt.name, + "Description": prompt.description or "No description", + } + ) + + # Display table + table = format_table( + table_data, + title=f"{len(prompts)} Available Prompts", + columns=["Name", "Description"], + ) + output.print_table(table) - # The existing implementation handles all output directly - # Just return success return CommandResult(success=True, data=prompts) except Exception as e: diff --git a/src/mcp_cli/commands/definitions/resources.py b/src/mcp_cli/commands/resources/resources.py similarity index 58% rename from src/mcp_cli/commands/definitions/resources.py rename to src/mcp_cli/commands/resources/resources.py index 7abdcab7..3c3ea0b0 100644 --- a/src/mcp_cli/commands/definitions/resources.py +++ b/src/mcp_cli/commands/resources/resources.py @@ -76,18 +76,52 @@ def parameters(self) -> list[CommandParameter]: async def execute(self, **kwargs) -> CommandResult: """Execute the resources command.""" - # Import the resources action from the actions module - from mcp_cli.commands.actions.resources import resources_action_async + from mcp_cli.context import get_context + from chuk_term.ui import output, format_table - # The existing implementation doesn't take parameters, - # so we'll use it as-is try: - # Use the existing implementation - # It handles all the display internally - resources = await resources_action_async() + # Get context and tool manager + context = get_context() + if not context or not context.tool_manager: + return CommandResult( + success=False, + error="No tool manager available. Please connect to a server first.", + ) + + # Get resources from tool manager + resources = await context.tool_manager.list_resources() + + if not resources: + return CommandResult( + success=True, + output="No resources available.", + ) + + # Build table data + table_data = [] + for resource in resources: + # ResourceInfo has id, name, type, and extra dict + # URI might be in extra or id field + uri = resource.id or resource.extra.get("uri", "unknown") + name = resource.name or "Unnamed" + type_val = resource.type or resource.extra.get("mime_type", "unknown") + + table_data.append( + { + "URI": uri, + "Name": name, + "Type": type_val, + } + ) + + # Display table + table = format_table( + table_data, + title=f"{len(resources)} Available Resources", + columns=["URI", "Name", "Type"], + ) + output.print_table(table) - # The existing implementation handles all output directly - # Just return success return CommandResult(success=True, data=resources) except Exception as e: diff --git a/src/mcp_cli/commands/servers/__init__.py b/src/mcp_cli/commands/servers/__init__.py new file mode 100644 index 00000000..12c5e9b6 --- /dev/null +++ b/src/mcp_cli/commands/servers/__init__.py @@ -0,0 +1,11 @@ +"""Server management commands.""" + +from mcp_cli.commands.servers.servers import ServersCommand +from mcp_cli.commands.servers.server_singular import ServerSingularCommand +from mcp_cli.commands.servers.ping import PingCommand + +__all__ = [ + "ServersCommand", + "ServerSingularCommand", + "PingCommand", +] diff --git a/src/mcp_cli/commands/definitions/ping.py b/src/mcp_cli/commands/servers/ping.py similarity index 71% rename from src/mcp_cli/commands/definitions/ping.py rename to src/mcp_cli/commands/servers/ping.py index 60358b4d..d97161a3 100644 --- a/src/mcp_cli/commands/definitions/ping.py +++ b/src/mcp_cli/commands/servers/ping.py @@ -75,8 +75,7 @@ def parameters(self) -> list[CommandParameter]: async def execute(self, **kwargs) -> CommandResult: """Execute the ping command.""" - # Import the ping action from the actions module - from mcp_cli.commands.actions.ping import ping_action_async + from chuk_term.ui import output # Get tool manager tool_manager = kwargs.get("tool_manager") @@ -110,8 +109,38 @@ async def execute(self, **kwargs) -> CommandResult: targets = [str(server_index)] try: - # Use the existing ping action - success = await ping_action_async(tool_manager, targets=targets) + # Get server information + servers = await tool_manager.get_server_info() + + if not servers: + return CommandResult( + success=False, + output="No servers available to ping.", + ) + + # Ping each server + output.info("Pinging servers...") + success = True + + for server in servers: + # Skip if filtering by targets and this server doesn't match + if ( + targets + and server.name not in targets + and str(servers.index(server)) not in targets + ): + continue + + try: + # Try to ping the server (check if it's connected) + if server.connected: + output.success(f"✓ {server.name}: Connected") + else: + output.error(f"✗ {server.name}: Disconnected") + success = False + except Exception as e: + output.error(f"✗ {server.name}: Error - {str(e)}") + success = False return CommandResult(success=success) diff --git a/src/mcp_cli/commands/servers/server_singular.py b/src/mcp_cli/commands/servers/server_singular.py new file mode 100644 index 00000000..e0f29ced --- /dev/null +++ b/src/mcp_cli/commands/servers/server_singular.py @@ -0,0 +1,192 @@ +# src/mcp_cli/commands/definitions/server_singular.py +""" +Server command - manages MCP servers (add, remove, enable, disable) and shows server details. +Supports both project servers (server_config.json) and user servers (~/.mcp-cli/preferences.json). +""" + +from __future__ import annotations + + +from mcp_cli.commands.base import ( + UnifiedCommand, + CommandResult, +) + + +class ServerSingularCommand(UnifiedCommand): + """Manage MCP servers - add, remove, enable, disable, or show details.""" + + @property + def name(self) -> str: + return "server" + + @property + def aliases(self) -> list[str]: + return [] # No aliases for singular form + + @property + def description(self) -> str: + return "Manage MCP servers or show server details" + + @property + def help_text(self) -> str: + return """ +Manage MCP servers or show details about a specific server. + +Usage: + /server - List all servers + /server - Show server details + /server list - List all servers + /server list all - Include disabled servers + +Server Management: + /server add stdio [args...] - Add STDIO server + /server add --transport http - Add HTTP server + /server add --transport sse - Add SSE server + /server remove - Remove user-added server + /server enable - Enable disabled server + /server disable - Disable server + /server ping - Test server connectivity + +Examples: + /server - List all servers + /server sqlite - Show sqlite server details + /server add time stdio uvx mcp-server-time - Add time server + /server add myapi --transport http --header "Authorization: Bearer token" -- https://api.example.com + /server disable sqlite - Disable sqlite server + /server remove time - Remove time server + +Note: User-added servers persist in ~/.mcp-cli/preferences.json +""" + + async def execute(self, **kwargs) -> CommandResult: + """Execute the server command.""" + from mcp_cli.context import get_context + from chuk_term.ui import output, format_table + + # Get args - handle both string and list + args = kwargs.get("args", []) + if isinstance(args, str): + args = [args] + elif not args: + args = [] + + # Get context and tool manager + context = get_context() + if not context or not context.tool_manager: + return CommandResult( + success=False, + error="No tool manager available. Please connect to a server first.", + ) + + if not args: + # No args - show list of servers + try: + servers = await context.tool_manager.get_server_info() + + if not servers: + return CommandResult( + success=True, + output="No servers connected.", + ) + + # Build table data + table_data = [] + for server in servers: + table_data.append( + { + "Server": server.name, + "Status": "✓ Connected" + if server.connected + else "✗ Disconnected", + "Tools": str(server.tool_count), + } + ) + + # Display table + table = format_table( + table_data, + title=f"{len(servers)} Servers", + columns=["Server", "Status", "Tools"], + ) + output.print_table(table) + + return CommandResult(success=True) + except Exception as e: + return CommandResult( + success=False, error=f"Failed to list servers: {str(e)}" + ) + + # Has arguments - handle server management commands + first_arg = args[0].lower() + + if first_arg == "list": + # List all servers + try: + servers = await context.tool_manager.get_server_info() + # Same as no args case + if not servers: + return CommandResult(success=True, output="No servers connected.") + + table_data = [] + for server in servers: + table_data.append( + { + "Server": server.name, + "Status": "✓ Connected" + if server.connected + else "✗ Disconnected", + "Tools": str(server.tool_count), + } + ) + + table = format_table( + table_data, + title=f"{len(servers)} Servers", + columns=["Server", "Status", "Tools"], + ) + output.print_table(table) + + return CommandResult(success=True) + except Exception as e: + return CommandResult( + success=False, error=f"Failed to list servers: {str(e)}" + ) + + elif first_arg in ["add", "remove", "enable", "disable", "ping"]: + # These commands need more complex implementation + return CommandResult( + success=False, + error=f"Server {first_arg} command not yet implemented in this version.", + ) + else: + # Treat as server name - show server details + server_name = first_arg + try: + servers = await context.tool_manager.get_server_info() + from mcp_cli.tools.models import ServerInfo + + found_server: ServerInfo | None = next( + (s for s in servers if s.name == server_name), None + ) + + if not found_server: + return CommandResult( + success=False, + error=f"Server '{server_name}' not found.", + ) + + # Show server details + output.panel( + f"Name: {found_server.name}\n" + f"Status: {'Connected' if found_server.connected else 'Disconnected'}\n" + f"Tools: {found_server.tool_count}\n" + f"Transport: {found_server.transport.value if found_server.transport else 'unknown'}", + title=f"Server: {found_server.name}", + ) + + return CommandResult(success=True) + except Exception as e: + return CommandResult( + success=False, error=f"Failed to get server details: {str(e)}" + ) diff --git a/src/mcp_cli/commands/servers/servers.py b/src/mcp_cli/commands/servers/servers.py new file mode 100644 index 00000000..96c97188 --- /dev/null +++ b/src/mcp_cli/commands/servers/servers.py @@ -0,0 +1,202 @@ +# src/mcp_cli/commands/definitions/servers.py +""" +Unified servers command implementation. + +This single implementation works across all modes (chat, CLI, interactive). +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from mcp_cli.commands.base import ( + UnifiedCommand, + CommandParameter, + CommandResult, +) +from mcp_cli.config.enums import OutputFormat + +if TYPE_CHECKING: + from mcp_cli.commands.models.server import ServerStatusInfo + + +class ServersCommand(UnifiedCommand): + """List and manage MCP servers.""" + + @property + def name(self) -> str: + return "servers" + + @property + def aliases(self) -> list[str]: + return [] + + @property + def description(self) -> str: + return "List connected MCP servers and their status" + + @property + def help_text(self) -> str: + return """ +List connected MCP servers and their status. + +Usage: + /servers - List all connected servers + /servers --detailed - Show detailed server information + /servers --ping - Test server connectivity + +Options: + --detailed - Show detailed server information + --format [table|json] - Output format (default: table) + --ping - Test server connectivity + +Examples: + /servers - Show server status table + /servers --detailed - Show full server details + /servers --ping - Check server connectivity + +Note: For server management (add/remove/enable/disable), use /server command +""" + + @property + def parameters(self) -> list[CommandParameter]: + return [ + CommandParameter( + name="detailed", + type=bool, + default=False, + help="Show detailed server information", + is_flag=True, + ), + CommandParameter( + name="format", + type=str, + default="table", + help="Output format", + choices=["table", "json"], + ), + CommandParameter( + name="ping", + type=bool, + default=False, + help="Test server connectivity", + is_flag=True, + ), + ] + + async def execute(self, **kwargs) -> CommandResult: + """Execute the servers command.""" + import json + + from mcp_cli.context import get_context + from chuk_term.ui import output, format_table + + # Extract parameters + _ = kwargs.get("detailed", False) # Reserved for future use + ping_servers = kwargs.get("ping", False) + output_format = kwargs.get("format", "table") + + # Get context and tool manager + context = get_context() + if not context or not context.tool_manager: + return CommandResult( + success=False, + error="No tool manager available. Please connect to a server first.", + ) + + try: + # Get server information (Pydantic ServerInfo models) + servers = await context.tool_manager.get_server_info() + + if not servers: + return CommandResult( + success=True, + output="No servers connected.", + ) + + # Output as JSON if requested + if output_format == OutputFormat.JSON: + server_data = [s.model_dump() for s in servers] + output.print(json.dumps(server_data, indent=2, default=str)) + return CommandResult(success=True, data=server_data) + + # Build table rows from Pydantic models + table_data = [] + for server in servers: + status = self._get_server_status(server) + + # Get connection info based on transport type + connection = self._get_connection_info(server) + + row = { + "Server": server.name, + "Type": server.transport.value.upper(), + "Status": f"{status.icon} {status.status}", + "Tools": str(server.tool_count), + "Connection": connection, + } + + table_data.append(row) + + # Display table with all columns + columns = ["Server", "Type", "Status", "Tools", "Connection"] + + table = format_table( + table_data, + title=f"{len(servers)} Connected Servers", + columns=columns, + ) + output.print_table(table) + + if ping_servers: + output.info("\n🏓 Pinging servers...") + output.hint("Use /ping for detailed connectivity testing") + + return CommandResult(success=True) + + except Exception as e: + return CommandResult( + success=False, + error=f"Failed to list servers: {str(e)}", + ) + + def _get_server_status(self, server) -> "ServerStatusInfo": + """Get status info for a server.""" + from mcp_cli.commands.models.server import ServerStatusInfo + + if server.connected: + return ServerStatusInfo( + icon="✅", + status="Connected", + reason="Server is online and responding", + ) + return ServerStatusInfo( + icon="❌", + status="Disconnected", + reason="Server is not responding", + ) + + def _get_connection_info(self, server) -> str: + """Get connection info string for display.""" + from mcp_cli.tools.models import TransportType + + if server.transport == TransportType.STDIO: + if server.command: + # Show command with truncated args + cmd = str(server.command) + if server.args: + args_str = " ".join(str(a) for a in server.args[:2]) + if len(server.args) > 2: + args_str += " ..." + return f"{cmd} {args_str}" + return cmd + return "stdio" + elif server.transport in (TransportType.HTTP, TransportType.SSE): + if server.url: + # Truncate long URLs + url = str(server.url) + if len(url) > 40: + return url[:37] + "..." + return url + return str(server.transport.value) + return "unknown" diff --git a/src/mcp_cli/commands/theme/__init__.py b/src/mcp_cli/commands/theme/__init__.py new file mode 100644 index 00000000..8a4dc0d1 --- /dev/null +++ b/src/mcp_cli/commands/theme/__init__.py @@ -0,0 +1,11 @@ +"""Theme management commands.""" + +from mcp_cli.commands.theme.theme_singular import ThemeSingularCommand +from mcp_cli.commands.theme.themes_plural import ThemesPluralCommand +from mcp_cli.commands.theme.theme import ThemeCommand + +__all__ = [ + "ThemeSingularCommand", + "ThemesPluralCommand", + "ThemeCommand", +] diff --git a/src/mcp_cli/commands/definitions/theme.py b/src/mcp_cli/commands/theme/theme.py similarity index 77% rename from src/mcp_cli/commands/definitions/theme.py rename to src/mcp_cli/commands/theme/theme.py index b5619e2f..0dc3203e 100644 --- a/src/mcp_cli/commands/definitions/theme.py +++ b/src/mcp_cli/commands/theme/theme.py @@ -84,7 +84,7 @@ def modes(self) -> CommandMode: async def execute(self, **kwargs) -> CommandResult: """Execute the theme command.""" from mcp_cli.utils.preferences import get_preference_manager - from mcp_cli.commands.actions.theme import _interactive_theme_selection + from chuk_term.ui import output pref_manager = get_preference_manager() theme_name = kwargs.get("theme_name") @@ -122,33 +122,34 @@ async def execute(self, **kwargs) -> CommandResult: # Save preference pref_manager.set_theme(theme_name) + output.success(f"Theme changed to: {theme_name}") + return CommandResult( success=True, output=f"Theme changed to: {theme_name}", ) else: - # Show interactive theme selector - try: - await _interactive_theme_selection(pref_manager) - return CommandResult(success=True) - except Exception: - # Fallback to showing current theme - current_theme = pref_manager.get_theme() - available_themes = [ - "default", - "dark", - "light", - "minimal", - "terminal", - "monokai", - "dracula", - "solarized", - ] + # Show current theme and available themes + current_theme = pref_manager.get_theme() + available_themes = [ + "default", + "dark", + "light", + "minimal", + "terminal", + "monokai", + "dracula", + "solarized", + ] - output_text = f"Current theme: {current_theme}\n" - output_text += f"Available themes: {', '.join(available_themes)}" + output.panel( + f"Current theme: {current_theme}\n\n" + f"Available themes:\n" + + "\n".join(f" - {t}" for t in available_themes), + title="Themes", + ) - return CommandResult( - success=True, - output=output_text, - ) + return CommandResult( + success=True, + output=f"Current theme: {current_theme}", + ) diff --git a/src/mcp_cli/commands/definitions/theme_singular.py b/src/mcp_cli/commands/theme/theme_singular.py similarity index 68% rename from src/mcp_cli/commands/definitions/theme_singular.py rename to src/mcp_cli/commands/theme/theme_singular.py index b7a0b116..34bf2f5f 100644 --- a/src/mcp_cli/commands/definitions/theme_singular.py +++ b/src/mcp_cli/commands/theme/theme_singular.py @@ -45,9 +45,9 @@ def help_text(self) -> str: async def execute(self, **kwargs) -> CommandResult: """Execute the theme command.""" - from mcp_cli.commands.actions.theme import theme_action_async from chuk_term.ui import output - from chuk_term.ui.theme import get_theme + from chuk_term.ui.theme import get_theme, set_theme + from mcp_cli.utils.preferences import get_preference_manager # Get args args = kwargs.get("args", []) @@ -56,11 +56,13 @@ async def execute(self, **kwargs) -> CommandResult: # No arguments - show current theme with preview try: current_theme = get_theme() + pref_manager = get_preference_manager() + theme_name = pref_manager.get_theme() # Display current theme in a panel using theme defaults output.panel( - f"ℹ Current theme: {current_theme.name}\n" - f"ℹ Description: {current_theme.description if hasattr(current_theme, 'description') else 'No description'}", + f"Current theme: {theme_name}\n" + f"Description: {getattr(current_theme, 'description', 'No description')}", title="Theme Status", ) @@ -85,12 +87,34 @@ async def execute(self, **kwargs) -> CommandResult: else: # Has arguments - theme name to switch to try: - from mcp_cli.commands.models import ThemeActionParams - - # Get the theme name from args theme_name = args[0] if isinstance(args, list) else str(args) - params = ThemeActionParams(theme_name=theme_name) - await theme_action_async(params) + + # Available themes + available_themes = [ + "default", + "dark", + "light", + "minimal", + "terminal", + "monokai", + "dracula", + "solarized", + ] + + if theme_name not in available_themes: + return CommandResult( + success=False, + error=f"Invalid theme: {theme_name}. Available themes: {', '.join(available_themes)}", + ) + + # Apply theme + set_theme(theme_name) + + # Save preference + pref_manager = get_preference_manager() + pref_manager.set_theme(theme_name) + + output.success(f"Switched to theme: {theme_name}") return CommandResult(success=True) except Exception as e: return CommandResult( diff --git a/src/mcp_cli/commands/theme/themes_plural.py b/src/mcp_cli/commands/theme/themes_plural.py new file mode 100644 index 00000000..f2a74510 --- /dev/null +++ b/src/mcp_cli/commands/theme/themes_plural.py @@ -0,0 +1,89 @@ +# src/mcp_cli/commands/definitions/themes_plural.py +""" +Plural themes command - lists all available themes. +""" + +from __future__ import annotations + + +from mcp_cli.commands.base import ( + UnifiedCommand, + CommandResult, +) + + +class ThemesPluralCommand(UnifiedCommand): + """List all available themes.""" + + @property + def name(self) -> str: + return "themes" + + @property + def aliases(self) -> list[str]: + return [] # No aliases + + @property + def description(self) -> str: + return "List all available themes" + + @property + def help_text(self) -> str: + return """ +List all available UI themes. + +Usage: + /themes - List all available themes + +Examples: + /themes - Show all themes with descriptions +""" + + async def execute(self, **kwargs) -> CommandResult: + """Execute the themes command.""" + from chuk_term.ui import output, format_table + from mcp_cli.utils.preferences import get_preference_manager + + try: + pref_manager = get_preference_manager() + current_theme = pref_manager.get_theme() + + # Define available themes with descriptions + themes = [ + {"name": "default", "description": "Default balanced theme"}, + {"name": "dark", "description": "Dark mode theme"}, + {"name": "light", "description": "Light mode theme"}, + {"name": "minimal", "description": "Minimal styling"}, + {"name": "terminal", "description": "Classic terminal colors"}, + {"name": "monokai", "description": "Monokai color scheme"}, + {"name": "dracula", "description": "Dracula color scheme"}, + {"name": "solarized", "description": "Solarized color scheme"}, + ] + + # Build table data + table_data = [] + for theme in themes: + is_current = "✓" if theme["name"] == current_theme else "" + table_data.append( + { + "Current": is_current, + "Theme": theme["name"], + "Description": theme["description"], + } + ) + + # Display table + table = format_table( + table_data, + title=f"{len(themes)} Available Themes", + columns=["Current", "Theme", "Description"], + ) + output.print_table(table) + + output.tip("Use: /theme to switch themes") + + return CommandResult(success=True) + except Exception as e: + return CommandResult( + success=False, error=f"Failed to list themes: {str(e)}" + ) diff --git a/src/mcp_cli/commands/tokens/__init__.py b/src/mcp_cli/commands/tokens/__init__.py new file mode 100644 index 00000000..a3d875f8 --- /dev/null +++ b/src/mcp_cli/commands/tokens/__init__.py @@ -0,0 +1,7 @@ +"""Token management commands.""" + +from mcp_cli.commands.tokens.token import TokenCommand + +__all__ = [ + "TokenCommand", +] diff --git a/src/mcp_cli/commands/tokens/token.py b/src/mcp_cli/commands/tokens/token.py new file mode 100644 index 00000000..25b3daf0 --- /dev/null +++ b/src/mcp_cli/commands/tokens/token.py @@ -0,0 +1,942 @@ +# src/mcp_cli/commands/definitions/token.py +""" +Unified token command implementation with all sub-actions. +""" + +from __future__ import annotations + +import json + +from chuk_term.ui import output, format_table +from mcp_cli.auth import TokenManager, TokenStoreBackend, TokenStoreFactory +from mcp_cli.auth import APIKeyToken, BearerToken, TokenType +from mcp_cli.config.config_manager import get_config +from mcp_cli.config.enums import TokenNamespace +from mcp_cli.config import NAMESPACE, OAUTH_NAMESPACE, GENERIC_NAMESPACE +from mcp_cli.commands.base import ( + UnifiedCommand, + CommandMode, + CommandParameter, + CommandResult, +) +from mcp_cli.commands.models import ( + TokenListParams, + TokenSetParams, + TokenDeleteParams, + TokenClearParams, + TokenProviderParams, +) + + +def _get_token_manager() -> TokenManager: + """Get configured token manager instance with mcp-cli namespace.""" + import os + + # Check for CLI override first + backend_override = os.environ.get("MCP_CLI_TOKEN_BACKEND") + if backend_override: + try: + backend = TokenStoreBackend(backend_override) + except (ValueError, KeyError): + # Invalid backend specified, fall through to config + backend = None + else: + backend = None + + # If no override or invalid override, check config + if backend is None: + try: + config = get_config() + backend = TokenStoreBackend(config.token_store_backend) + except Exception: + backend = TokenStoreBackend.AUTO + + return TokenManager(backend=backend, namespace=NAMESPACE, service_name="mcp-cli") + + +class TokenCommand(UnifiedCommand): + """Manage OAuth and authentication tokens.""" + + @property + def name(self) -> str: + return "token" + + @property + def aliases(self) -> list[str]: + return ["tokens"] + + @property + def description(self) -> str: + return "Manage OAuth and authentication tokens" + + @property + def help_text(self) -> str: + return """ +Manage OAuth and authentication tokens. + +Usage: + /token - List all stored tokens (chat/interactive mode) + /token list - List all stored tokens + /token set - Store a bearer token + /token get - Get details for a specific token + /token clear - Clear all tokens (with confirmation) + /token clear --force - Clear all tokens without confirmation + /token delete - Delete a specific token + +Examples: + /token # Show all tokens + /token list # Show all tokens + /token set my-api secret-token # Store a bearer token + /token get my-api # Show token details + /token get notion # Show notion OAuth token details + /token clear # Clear all tokens (asks for confirmation) + /token delete my-api # Delete the token +""" + + @property + def parameters(self) -> list[CommandParameter]: + """Define parameters for token command.""" + return [ + CommandParameter( + name="action", + type=str, + required=False, + help="Action: list, set, get, delete, clear, backends, set-provider, get-provider, delete-provider", + ), + CommandParameter( + name="name", + type=str, + required=False, + help="Token/provider name", + ), + CommandParameter( + name="value", + type=str, + required=False, + help="Token value (for set action)", + ), + CommandParameter( + name="token_type", + type=str, + default="bearer", + help="Token type: bearer, api-key, generic", + ), + CommandParameter( + name="provider", + type=str, + required=False, + help="Provider name (for API keys)", + ), + CommandParameter( + name="namespace", + type=str, + required=False, + help="Storage namespace", + ), + CommandParameter( + name="show_oauth", + type=bool, + default=True, + help="Show OAuth tokens", + is_flag=True, + ), + CommandParameter( + name="show_bearer", + type=bool, + default=True, + help="Show bearer tokens", + is_flag=True, + ), + CommandParameter( + name="show_api_keys", + type=bool, + default=True, + help="Show API keys", + is_flag=True, + ), + CommandParameter( + name="show_providers", + type=bool, + default=True, + help="Show provider tokens", + is_flag=True, + ), + CommandParameter( + name="is_oauth", + type=bool, + default=False, + help="Delete OAuth token", + is_flag=True, + ), + CommandParameter( + name="force", + type=bool, + default=False, + help="Skip confirmation (clear)", + is_flag=True, + ), + CommandParameter( + name="api_key", + type=str, + required=False, + help="API key value (for set-provider)", + ), + ] + + @property + def modes(self) -> CommandMode: + """Token is available in all modes.""" + return CommandMode.CLI | CommandMode.CHAT | CommandMode.INTERACTIVE + + @property + def requires_context(self) -> bool: + """Token needs context for server list.""" + return True + + async def execute(self, **kwargs) -> CommandResult: + """Execute the token command with all sub-actions.""" + # Get action from kwargs + action = kwargs.get("action", "list") + + # Handle args array for chat/interactive mode + args = kwargs.get("args", []) + if isinstance(args, str): + args = [args] + elif args and len(args) > 0: + # First arg is the action in chat mode + action = args[0].lower() + + # Get tool_manager for server list + tool_manager = kwargs.get("tool_manager") + server_names = tool_manager.servers if tool_manager else [] + + # Route to appropriate sub-action + from mcp_cli.config import TokenAction + + try: + if not action or action == TokenAction.LIST.value: + return await self._action_list(kwargs, server_names) + elif action == TokenAction.SET.value: + return await self._action_set(kwargs, args) + elif action == TokenAction.GET.value: + return await self._action_get(kwargs, args) + elif action == TokenAction.DELETE.value: + return await self._action_delete(kwargs, args) + elif action == TokenAction.CLEAR.value: + return await self._action_clear(kwargs, args) + elif action == TokenAction.BACKENDS.value: + return await self._action_backends() + elif action == TokenAction.SET_PROVIDER.value: + return await self._action_set_provider(kwargs) + elif action == TokenAction.GET_PROVIDER.value: + return await self._action_get_provider(kwargs) + elif action == TokenAction.DELETE_PROVIDER.value: + return await self._action_delete_provider(kwargs) + else: + return CommandResult( + success=False, + error=f"Unknown token action: {action}. Valid actions: list, set, get, delete, clear, backends, set-provider, get-provider, delete-provider", + ) + except Exception as e: + return CommandResult(success=False, error=f"Token command error: {e}") + + async def _action_list( + self, kwargs: dict, server_names: list[str] + ) -> CommandResult: + """List all stored tokens.""" + params = TokenListParams( + namespace=kwargs.get("namespace"), + show_oauth=kwargs.get("show_oauth", True), + show_bearer=kwargs.get("show_bearer", True), + show_api_keys=kwargs.get("show_api_keys", True), + show_providers=kwargs.get("show_providers", True), + server_names=server_names, + ) + + try: + manager = _get_token_manager() + + output.rule("[bold]🔐 Stored Tokens[/bold]", style="primary") + + # Track if we showed any tokens at all + provider_tokens = {} + oauth_entries = [] + + # Show provider tokens with hierarchical status + if params.show_providers and ( + params.namespace is None or params.namespace == TokenNamespace.PROVIDER + ): + from mcp_cli.auth.provider_tokens import list_all_provider_tokens + + provider_tokens = list_all_provider_tokens(manager) + + if provider_tokens: + output.print( + "\n[bold]Provider API Keys (Stored in Secure Storage):[/bold]" + ) + provider_table_data = [] + + for provider_name, status_info in provider_tokens.items(): + env_var = status_info["env_var"] + status_display = "🔐 storage" + + if status_info["in_env"]: + note = f"(overridden by {env_var})" + else: + note = "active" + + provider_table_data.append( + { + "Provider": provider_name, + "Status": status_display, + "Env Var": env_var, + "Note": note, + } + ) + + provider_table = format_table( + provider_table_data, + title=None, + columns=["Provider", "Status", "Env Var", "Note"], + ) + output.print_table(provider_table) + output.info( + "💡 Environment variables take precedence over stored tokens" + ) + output.print() + + # List OAuth tokens + if params.show_oauth and params.server_names: + for server_name in params.server_names: + tokens = manager.load_tokens(server_name) + if tokens: + metadata = {} + if tokens.expires_in: + import time + + if tokens.issued_at: + metadata["expires_at"] = ( + tokens.issued_at + tokens.expires_in + ) + else: + metadata["expires_at"] = time.time() + tokens.expires_in + + oauth_entries.append( + { + "name": server_name, + "type": "oauth", + "namespace": OAUTH_NAMESPACE, + "registered_at": tokens.issued_at + if tokens.issued_at + else None, + "metadata": metadata, + } + ) + + if oauth_entries: + output.print("\n[bold]OAuth Tokens (Server Authentication):[/bold]") + oauth_table_data = [] + + for entry in oauth_entries: + import time + from datetime import datetime + + token_name = entry.get("name", "unknown") + token_type = entry.get("type", "unknown") + + registered_at = entry.get("registered_at") + created = "-" + if registered_at and isinstance(registered_at, (int, float)): + dt = datetime.fromtimestamp(registered_at) + created = dt.strftime("%Y-%m-%d") + + metadata_raw = entry.get("metadata", {}) + metadata = ( + metadata_raw if isinstance(metadata_raw, dict) else {} + ) + expires = metadata.get("expires_at", "-") + if expires != "-" and isinstance(expires, (int, float)): + exp_dt = datetime.fromtimestamp(expires) + if time.time() > expires: + expires = f"{exp_dt.strftime('%Y-%m-%d')} ⚠️ Expired" + else: + expires = exp_dt.strftime("%Y-%m-%d") + + oauth_table_data.append( + { + "Server": token_name, + "Type": token_type, + "Created": created, + "Expires": expires, + } + ) + + oauth_table = format_table( + oauth_table_data, + title=None, + columns=["Server", "Type", "Created", "Expires"], + ) + output.print_table(oauth_table) + output.info("💡 Use '/token get ' to view token details") + output.print() + elif params.show_oauth and not params.server_names: + output.info( + "No servers configured. OAuth tokens are stored per server." + ) + output.print() + + # List tokens from registry + registry = manager.registry + registered_tokens = registry.list_tokens(namespace=params.namespace) + + table_data = [] + for entry in registered_tokens: + token_type = entry.get("type", "unknown") + token_name = entry.get("name", "unknown") + token_namespace = entry.get("namespace", "unknown") + + if params.show_providers and token_namespace == TokenNamespace.PROVIDER: + continue + if params.show_oauth and token_namespace == OAUTH_NAMESPACE: + continue + + if token_type == TokenType.BEARER.value and not params.show_bearer: + continue + if token_type == TokenType.API_KEY.value and not params.show_api_keys: + continue + + import time + from datetime import datetime + + registered_at = entry.get("registered_at") + created = "-" + if registered_at and isinstance(registered_at, (int, float)): + dt = datetime.fromtimestamp(registered_at) + created = dt.strftime("%Y-%m-%d") + + metadata_raw = entry.get("metadata", {}) + metadata = metadata_raw if isinstance(metadata_raw, dict) else {} + expires = metadata.get("expires_at", "-") + if expires != "-" and isinstance(expires, (int, float)): + exp_dt = datetime.fromtimestamp(expires) + if time.time() > expires: + expires = f"{exp_dt.strftime('%Y-%m-%d')} ⚠️" + else: + expires = exp_dt.strftime("%Y-%m-%d") + + details = [] + if metadata.get("provider"): + details.append(f"provider={metadata['provider']}") + if token_namespace != TokenNamespace.GENERIC: + details.append(f"ns={token_namespace}") + + table_data.append( + { + "Type": token_type, + "Name": token_name, + "Created": created, + "Expires": expires, + "Details": ", ".join(details) if details else "-", + } + ) + + if table_data: + output.print("\n[bold]Other Tokens:[/bold]") + table = format_table( + table_data, + title=None, + columns=["Type", "Name", "Created", "Expires", "Details"], + ) + output.print_table(table) + elif not provider_tokens and not oauth_entries: + output.warning("No tokens found.") + + output.print() + output.tip("💡 Token Management:") + output.info(" • Store provider key: mcp-cli token set-provider ") + output.info( + " • Store bearer token: mcp-cli token set --type bearer" + ) + output.info(" • View: mcp-cli token get ") + output.info(" • Delete: mcp-cli token delete ") + + return CommandResult(success=True) + + except Exception as e: + return CommandResult(success=False, error=f"Error listing tokens: {e}") + + async def _action_set(self, kwargs: dict, args: list[str]) -> CommandResult: + """Store a token.""" + # Get parameters from kwargs or args + name = kwargs.get("name") + value = kwargs.get("value") + + # For chat mode, parse from args + if args and len(args) >= 3: + name = args[1] + value = args[2] + + if not name: + return CommandResult(success=False, error="Token name is required") + + params = TokenSetParams( + name=name, + value=value, + token_type=kwargs.get("token_type", "bearer"), + provider=kwargs.get("provider"), + namespace=kwargs.get("namespace") or GENERIC_NAMESPACE, + ) + + try: + manager = _get_token_manager() + store = manager.token_store + + # Prompt for value if not provided + if params.value is None: + from getpass import getpass + + params.value = getpass(f"Enter token value for '{params.name}': ") + + if not params.value: + return CommandResult(success=False, error="Token value is required") + + registry = manager.registry + + # Normalize token_type: CLI uses hyphens (api-key) but + # TokenType enum uses underscores (api_key) + normalized_type = params.token_type.replace("-", "_") + + if normalized_type == TokenType.BEARER.value: + bearer = BearerToken(token=params.value) + stored = bearer.to_stored_token(params.name) + stored.metadata = {"namespace": params.namespace} + store._store_raw( + f"{params.namespace}:{params.name}", json.dumps(stored.model_dump()) + ) + + reg_metadata = {} + if bearer.expires_at: + reg_metadata["expires_at"] = bearer.expires_at + + registry.register( + params.name, + TokenType.BEARER, + params.namespace, + metadata=reg_metadata, + ) + output.success(f"Bearer token '{params.name}' stored successfully") + + elif normalized_type == TokenType.API_KEY.value: + if not params.provider: + return CommandResult( + success=False, + error="Provider name is required for API keys. Use: --provider ", + ) + + api_key = APIKeyToken(provider=params.provider, key=params.value) + stored = api_key.to_stored_token(params.name) + stored.metadata = {"namespace": params.namespace} + store._store_raw( + f"{params.namespace}:{params.name}", json.dumps(stored.model_dump()) + ) + + registry.register( + params.name, + TokenType.API_KEY, + params.namespace, + metadata={"provider": params.provider}, + ) + output.success( + f"API key '{params.name}' for '{params.provider}' stored successfully" + ) + + elif normalized_type == TokenNamespace.GENERIC: + store.store_generic(params.name, params.value, params.namespace) + registry.register( + params.name, TokenType.BEARER, params.namespace, metadata={} + ) + output.success( + f"Token '{params.name}' stored in namespace '{params.namespace}'" + ) + + else: + return CommandResult( + success=False, + error=f"Unknown token type: {params.token_type}. Valid types: bearer, api-key, generic", + ) + + return CommandResult(success=True) + + except Exception as e: + return CommandResult(success=False, error=f"Error storing token: {e}") + + async def _action_get(self, kwargs: dict, args: list[str]) -> CommandResult: + """Get information about a stored token.""" + name = kwargs.get("name") + + # For chat mode, parse from args + if args and len(args) >= 2: + name = args[1] + + if not name: + return CommandResult(success=False, error="Token name is required") + + namespace = kwargs.get("namespace", "generic") + + try: + manager = _get_token_manager() + store = manager.token_store + + raw_data = store._retrieve_raw(f"{namespace}:{name}") + if not raw_data: + # Try OAuth namespace + raw_data = store._retrieve_raw(f"{OAUTH_NAMESPACE}:{name}") + if raw_data: + namespace = OAUTH_NAMESPACE + + if not raw_data: + output.warning(f"Token '{name}' not found") + return CommandResult(success=False) + + try: + from mcp_cli.auth import StoredToken + + stored = StoredToken.model_validate(json.loads(raw_data)) + info = stored.get_display_info() + + output.rule(f"[bold]Token: {name}[/bold]", style="primary") + output.info(f"Type: {stored.token_type.value}") + output.info(f"Namespace: {namespace}") + + for key, value in info.items(): + if key not in ["name", "type"]: + output.info(f"{key}: {value}") + + except Exception as e: + output.warning(f"Could not parse token data: {e}") + + return CommandResult(success=True) + + except Exception as e: + return CommandResult(success=False, error=f"Error retrieving token: {e}") + + async def _action_delete(self, kwargs: dict, args: list[str]) -> CommandResult: + """Delete a stored token.""" + name = kwargs.get("name") + + # For chat mode, parse from args + if args and len(args) >= 2: + name = args[1] + + if not name: + return CommandResult(success=False, error="Token name is required") + + params = TokenDeleteParams( + name=name, + namespace=kwargs.get("namespace"), + oauth=kwargs.get("is_oauth", False), + ) + + try: + manager = _get_token_manager() + store = manager.token_store + registry = manager.registry + + if params.oauth: + if manager.delete_tokens(params.name): + output.success(f"OAuth token for server '{params.name}' deleted") + else: + output.warning(f"OAuth token for server '{params.name}' not found") + return CommandResult(success=True) + + # Delete generic token + if params.namespace: + namespaces = [params.namespace] + else: + namespaces = [ + TokenNamespace.BEARER, + TokenNamespace.API_KEY, + TokenNamespace.PROVIDER, + TokenNamespace.GENERIC, + ] + + deleted = False + for ns in namespaces: + if store.delete_generic(params.name, ns): + registry.unregister(params.name, ns) + output.success( + f"Token '{params.name}' deleted from namespace '{ns}'" + ) + deleted = True + break + + if not deleted: + output.warning(f"Token '{params.name}' not found") + return CommandResult(success=False) + + return CommandResult(success=True) + + except Exception as e: + return CommandResult(success=False, error=f"Error deleting token: {e}") + + async def _action_clear(self, kwargs: dict, args: list[str]) -> CommandResult: + """Clear all stored tokens.""" + force = kwargs.get("force", False) or "--force" in args or "-f" in args + + params = TokenClearParams( + namespace=kwargs.get("namespace"), + force=force, + ) + + try: + manager = _get_token_manager() + store = manager.token_store + registry = manager.registry + + # Confirm before clearing + if not params.force: + if params.namespace: + msg = f"Clear all tokens in namespace '{params.namespace}'?" + else: + msg = "Clear ALL tokens from ALL namespaces?" + + from chuk_term.ui.prompts import confirm + + if not confirm(msg): + output.warning("Cancelled") + return CommandResult(success=False) + + # Get tokens to clear from registry + tokens_to_clear = registry.list_tokens(namespace=params.namespace) + + if not tokens_to_clear: + output.warning("No tokens to clear") + return CommandResult(success=True) + + # Clear each token from storage + count = 0 + for entry in tokens_to_clear: + token_name = entry.get("name") + token_namespace = entry.get("namespace") + if ( + token_name + and token_namespace + and store.delete_generic(token_name, token_namespace) + ): + count += 1 + + # Clear from registry + if params.namespace: + registry.clear_namespace(params.namespace) + else: + registry.clear_all() + + if count > 0: + output.success(f"Cleared {count} token(s)") + else: + output.warning("No tokens to clear") + + return CommandResult(success=True) + + except Exception as e: + return CommandResult(success=False, error=f"Error clearing tokens: {e}") + + async def _action_backends(self) -> CommandResult: + """List available token storage backends.""" + import os + + try: + available = TokenStoreFactory.get_available_backends() + + backend_override = os.environ.get("MCP_CLI_TOKEN_BACKEND") + override_succeeded = False + if backend_override: + try: + detected = TokenStoreBackend(backend_override) + override_succeeded = True + except (ValueError, KeyError): + detected = TokenStoreFactory._detect_backend() + output.warning( + f"Invalid backend '{backend_override}', using auto-detected backend" + ) + else: + detected = TokenStoreFactory._detect_backend() + + output.rule("[bold]🔒 Token Storage Backends[/bold]", style="primary") + + all_backends = [ + ("keychain", "macOS Keychain"), + ("windows", "Windows Credential Manager"), + ("secretservice", "Linux Secret Service"), + ("vault", "HashiCorp Vault"), + ("encrypted", "Encrypted File Storage"), + ] + + table_data = [] + for backend_id, backend_name in all_backends: + backend = TokenStoreBackend(backend_id) + is_available = backend in available + is_detected = backend == detected + + status = [] + if is_detected: + status.append("🎯 Auto-detected") + if is_available: + status.append("✓") + + table_data.append( + { + "Backend": backend_name, + "Available": "✓" if is_available else "✗", + "Status": " ".join(status) if status else "-", + } + ) + + table = format_table( + table_data, title=None, columns=["Backend", "Available", "Status"] + ) + output.print_table(table) + output.print() + if override_succeeded: + output.info( + f"Current backend: {detected.value} (overridden via --token-backend)" + ) + else: + output.info(f"Current backend: {detected.value}") + + return CommandResult(success=True) + + except Exception as e: + return CommandResult(success=False, error=f"Error listing backends: {e}") + + async def _action_set_provider(self, kwargs: dict) -> CommandResult: + """Store a provider API key.""" + provider = kwargs.get("provider") + if not provider: + return CommandResult(success=False, error="Provider name is required") + + params = TokenProviderParams( + provider=provider, + api_key=kwargs.get("api_key"), + ) + + try: + from mcp_cli.auth.provider_tokens import ( + set_provider_token, + get_provider_env_var_name, + ) + import os + + manager = _get_token_manager() + + # Prompt for api_key if not provided + api_key = params.api_key + if api_key is None: + from getpass import getpass + + api_key = getpass(f"Enter API key for '{params.provider}': ") + + if not api_key: + return CommandResult(success=False, error="API key is required") + + # Store the token + if set_provider_token(params.provider, api_key, manager): + output.success(f"✅ Stored API key for provider '{params.provider}'") + + # Show hierarchy info + env_var = get_provider_env_var_name(params.provider) + output.print() + output.info("📋 Token Hierarchy:") + output.info(f" 1. Environment variable: {env_var} (highest priority)") + output.info(" 2. Secure storage: 🔐 (currently set)") + + # Check if env var is also set + if os.environ.get(env_var): + output.warning( + f"\n⚠️ Note: {env_var} is set in environment and will take precedence" + ) + return CommandResult(success=True) + else: + return CommandResult( + success=False, + error=f"Failed to store API key for provider '{params.provider}'", + ) + + except Exception as e: + return CommandResult( + success=False, error=f"Error storing provider token: {e}" + ) + + async def _action_get_provider(self, kwargs: dict) -> CommandResult: + """Get information about a provider's API key.""" + provider = kwargs.get("provider") + if not provider: + return CommandResult(success=False, error="Provider name is required") + + params = TokenProviderParams(provider=provider) + + try: + from mcp_cli.auth.provider_tokens import check_provider_token_status + + manager = _get_token_manager() + status = check_provider_token_status(params.provider, manager) + + output.rule( + f"[bold]Provider Token: {params.provider}[/bold]", style="primary" + ) + + if status["has_token"]: + output.success("✅ API key is configured") + output.info(f" Source: {status['source']}") + else: + output.warning("❌ No API key configured") + + output.print() + output.info("Token Status:") + output.info( + f" • Environment variable ({status['env_var']}): {'✅ set' if status['in_env'] else '❌ not set'}" + ) + output.info( + f" • Secure storage: {'✅ set' if status['in_storage'] else '❌ not set'}" + ) + + output.print() + output.tip("Hierarchy: Environment variables take precedence over storage") + + if not status["has_token"]: + output.print() + output.info("To set API key:") + output.info( + f" • Via storage: mcp-cli token set-provider {params.provider}" + ) + output.info(f" • Via environment: export {status['env_var']}=your-key") + + return CommandResult(success=True) + + except Exception as e: + return CommandResult( + success=False, error=f"Error retrieving provider token info: {e}" + ) + + async def _action_delete_provider(self, kwargs: dict) -> CommandResult: + """Delete a provider API key from secure storage.""" + provider = kwargs.get("provider") + if not provider: + return CommandResult(success=False, error="Provider name is required") + + try: + from mcp_cli.auth.provider_tokens import delete_provider_token + + manager = _get_token_manager() + + if delete_provider_token(provider, manager): + output.success(f"Deleted API key for provider '{provider}'") + return CommandResult(success=True) + else: + output.warning(f"No API key found for provider '{provider}'") + return CommandResult(success=False) + + except Exception as e: + return CommandResult( + success=False, error=f"Error deleting provider token: {e}" + ) diff --git a/src/mcp_cli/commands/tools/__init__.py b/src/mcp_cli/commands/tools/__init__.py new file mode 100644 index 00000000..47abd608 --- /dev/null +++ b/src/mcp_cli/commands/tools/__init__.py @@ -0,0 +1,11 @@ +"""Tool management commands.""" + +from mcp_cli.commands.tools.tools import ToolsCommand +from mcp_cli.commands.tools.execute_tool import ExecuteToolCommand +from mcp_cli.commands.tools.tool_history import ToolHistoryCommand + +__all__ = [ + "ToolsCommand", + "ExecuteToolCommand", + "ToolHistoryCommand", +] diff --git a/src/mcp_cli/commands/definitions/execute_tool.py b/src/mcp_cli/commands/tools/execute_tool.py similarity index 88% rename from src/mcp_cli/commands/definitions/execute_tool.py rename to src/mcp_cli/commands/tools/execute_tool.py index 44050d3b..8fa94f7b 100644 --- a/src/mcp_cli/commands/definitions/execute_tool.py +++ b/src/mcp_cli/commands/tools/execute_tool.py @@ -19,6 +19,53 @@ from chuk_term.ui import output +def _to_serializable(obj: Any) -> Any: + """Convert an object to a JSON-serializable form. + + Handles MCP SDK ToolResult, Pydantic models, and other non-serializable types. + """ + # Handle None + if obj is None: + return None + + # Handle primitives + if isinstance(obj, (str, int, float, bool)): + return obj + + # Handle lists + if isinstance(obj, list): + return [_to_serializable(item) for item in obj] + + # Handle dicts + if isinstance(obj, dict): + return {k: _to_serializable(v) for k, v in obj.items()} + + # Handle Pydantic models (they have model_dump or dict method) + if hasattr(obj, "model_dump"): + return _to_serializable(obj.model_dump()) + if hasattr(obj, "dict"): + return _to_serializable(obj.dict()) + + # Handle MCP SDK ToolResult (has content attribute) + if hasattr(obj, "content"): + content = obj.content + # Content is typically a list of TextContent/ImageContent objects + if isinstance(content, list): + result_parts = [] + for item in content: + if hasattr(item, "text"): + result_parts.append(item.text) + elif hasattr(item, "model_dump"): + result_parts.append(_to_serializable(item.model_dump())) + else: + result_parts.append(str(item)) + return "\n".join(result_parts) if len(result_parts) == 1 else result_parts + return _to_serializable(content) + + # Fallback to string representation + return str(obj) + + class ExecuteToolCommand(UnifiedCommand): """Command to execute a tool with parameters.""" @@ -248,17 +295,23 @@ async def execute( # Build example example_params: dict[str, Any] = {} if "properties" in schema: + from mcp_cli.config import ( + JSON_TYPE_BOOLEAN, + JSON_TYPE_NUMBER, + JSON_TYPE_STRING, + ) + for prop_name, prop_info in schema["properties"].items(): if prop_name in schema.get("required", []): - prop_type = prop_info.get("type", "string") - if prop_type == "string": + prop_type = prop_info.get("type", JSON_TYPE_STRING) + if prop_type == JSON_TYPE_STRING: if prop_name == "message": example_params[prop_name] = "your message here" else: example_params[prop_name] = f"<{prop_name}>" - elif prop_type == "number": + elif prop_type == JSON_TYPE_NUMBER: example_params[prop_name] = 123 - elif prop_type == "boolean": + elif prop_type == JSON_TYPE_BOOLEAN: example_params[prop_name] = True output.success("✅ Use this format:") @@ -293,10 +346,12 @@ async def execute( if result.success and result.result is not None: output.success("✅ Tool executed successfully") # Extract the actual result from ToolCallResult - if isinstance(result.result, dict): - output.print(json.dumps(result.result, indent=2)) + # Use _to_serializable to handle MCP SDK types + serializable_result = _to_serializable(result.result) + if isinstance(serializable_result, (dict, list)): + output.print(json.dumps(serializable_result, indent=2)) else: - output.print(str(result.result)) + output.print(str(serializable_result)) elif result.error: # Handle error case cleanly without scary stack traces output.error("❌ Tool execution failed") @@ -334,7 +389,8 @@ async def execute( output.warning("Tool returned no result") elif isinstance(result, dict): # type: ignore[unreachable] output.success("✅ Tool executed successfully") - output.print(json.dumps(result, indent=2)) + serializable_result = _to_serializable(result) + output.print(json.dumps(serializable_result, indent=2)) else: output.success("✅ Tool executed successfully") output.print(str(result)) diff --git a/src/mcp_cli/commands/definitions/tool_history.py b/src/mcp_cli/commands/tools/tool_history.py similarity index 100% rename from src/mcp_cli/commands/definitions/tool_history.py rename to src/mcp_cli/commands/tools/tool_history.py diff --git a/src/mcp_cli/commands/definitions/tools.py b/src/mcp_cli/commands/tools/tools.py similarity index 100% rename from src/mcp_cli/commands/definitions/tools.py rename to src/mcp_cli/commands/tools/tools.py diff --git a/src/mcp_cli/config/__init__.py b/src/mcp_cli/config/__init__.py index dc0edfb9..ecb1e38d 100644 --- a/src/mcp_cli/config/__init__.py +++ b/src/mcp_cli/config/__init__.py @@ -1,36 +1,235 @@ """ Configuration management for MCP CLI. + +Clean, async-native, Pydantic-based configuration system. +All constants, enums, and configuration are consolidated here. """ -from mcp_cli.config.config_manager import ( - ServerConfig, +from importlib.metadata import PackageNotFoundError, version + +# Enums (all type-safe enums) +from mcp_cli.config.enums import ( + ConfigSource, + ConversationAction, + OutputFormat, + ServerAction, + ServerStatus, + ThemeAction, + TimeoutType, + TokenAction, + TokenBackend, + TokenNamespace, + ToolAction, +) + +# Environment variables +from mcp_cli.config.env_vars import ( + EnvVar, + get_env, + get_env_bool, + get_env_float, + get_env_int, + get_env_list, + is_set, + set_env, + unset_env, +) + +# Defaults and constants +from mcp_cli.config.defaults import ( + # Timeouts + DEFAULT_HTTP_CONNECT_TIMEOUT, + DEFAULT_HTTP_REQUEST_TIMEOUT, + DEFAULT_SERVER_INIT_TIMEOUT, + DEFAULT_STREAMING_CHUNK_TIMEOUT, + DEFAULT_STREAMING_FIRST_CHUNK_TIMEOUT, + DEFAULT_STREAMING_GLOBAL_TIMEOUT, + DEFAULT_TOOL_EXECUTION_TIMEOUT, + DISCOVERY_TIMEOUT, + REFRESH_TIMEOUT, + SHUTDOWN_TIMEOUT, + # Tool config + DEFAULT_CONFIRM_TOOLS, + DEFAULT_DYNAMIC_TOOLS_ENABLED, + DEFAULT_MAX_TOOL_CONCURRENCY, + # Conversation + DEFAULT_MAX_TURNS, + DEFAULT_SYSTEM_PROMPT, + # Provider/Model + DEFAULT_MODEL, + DEFAULT_PROVIDER, + # UI + DEFAULT_THEME, + DEFAULT_VERBOSE, + # Token/Auth + DEFAULT_TOKEN_BACKEND, + # Paths + DEFAULT_CONFIG_FILENAME, + # Application + APP_NAME, + GENERIC_NAMESPACE, + NAMESPACE, + OAUTH_NAMESPACE, + PROVIDER_NAMESPACE, + # Platforms + PLATFORM_DARWIN, + PLATFORM_LINUX, + PLATFORM_WINDOWS, + # Providers + PROVIDER_ANTHROPIC, + PROVIDER_DEEPSEEK, + PROVIDER_GROQ, + PROVIDER_OLLAMA, + PROVIDER_OPENAI, + PROVIDER_XAI, + SUPPORTED_PROVIDERS, + # JSON Schema + JSON_TYPE_ARRAY, + JSON_TYPE_BOOLEAN, + JSON_TYPE_INTEGER, + JSON_TYPE_NULL, + JSON_TYPE_NUMBER, + JSON_TYPE_OBJECT, + JSON_TYPE_STRING, + JSON_TYPES, +) + +# Config models +from mcp_cli.config.models import ( + ConfigOverride, MCPConfig, + TimeoutConfig, + ToolConfig, + TokenStorageConfig, + VaultConfig, +) +from mcp_cli.config.runtime import ResolvedValue, RuntimeConfig + +# Get version from package metadata +try: + APP_VERSION = version("mcp-cli") +except PackageNotFoundError: + APP_VERSION = "0.0.0-dev" + +# Legacy compatibility (during transition) +from mcp_cli.config.config_manager import ( ConfigManager, + ServerConfig, + detect_server_types, get_config, initialize_config, - detect_server_types, validate_server_config, ) from mcp_cli.config.discovery import ( + force_discovery_refresh, + get_available_models_quick, + get_discovery_status, setup_chuk_llm_environment, trigger_discovery_after_setup, - get_available_models_quick, validate_provider_exists, - get_discovery_status, - force_discovery_refresh, ) from mcp_cli.config.cli_options import ( - load_config, extract_server_names, - inject_logging_env_vars, - process_options, get_config_summary, + load_config, + process_options, +) +from mcp_cli.config.logging import ( + get_logger, + setup_logging, + setup_silent_mcp_environment, ) __all__ = [ - # Config Manager - "ServerConfig", + # Config models "MCPConfig", + "TimeoutConfig", + "ToolConfig", + "TokenStorageConfig", + "VaultConfig", + "RuntimeConfig", + "ConfigOverride", + "ResolvedValue", + # Enums + "ConfigSource", + "ConversationAction", + "OutputFormat", + "ServerAction", + "ServerStatus", + "ThemeAction", + "TimeoutType", + "TokenAction", + "TokenBackend", + "TokenNamespace", + "ToolAction", + # Environment variables + "EnvVar", + "get_env", + "get_env_bool", + "get_env_float", + "get_env_int", + "get_env_list", + "is_set", + "set_env", + "unset_env", + # Timeouts + "DEFAULT_HTTP_CONNECT_TIMEOUT", + "DEFAULT_HTTP_REQUEST_TIMEOUT", + "DEFAULT_SERVER_INIT_TIMEOUT", + "DEFAULT_STREAMING_CHUNK_TIMEOUT", + "DEFAULT_STREAMING_FIRST_CHUNK_TIMEOUT", + "DEFAULT_STREAMING_GLOBAL_TIMEOUT", + "DEFAULT_TOOL_EXECUTION_TIMEOUT", + "DISCOVERY_TIMEOUT", + "REFRESH_TIMEOUT", + "SHUTDOWN_TIMEOUT", + # Tool config defaults + "DEFAULT_CONFIRM_TOOLS", + "DEFAULT_DYNAMIC_TOOLS_ENABLED", + "DEFAULT_MAX_TOOL_CONCURRENCY", + # Conversation defaults + "DEFAULT_MAX_TURNS", + "DEFAULT_SYSTEM_PROMPT", + # Provider/Model defaults + "DEFAULT_MODEL", + "DEFAULT_PROVIDER", + # UI defaults + "DEFAULT_THEME", + "DEFAULT_VERBOSE", + # Token/Auth defaults + "DEFAULT_TOKEN_BACKEND", + # Path defaults + "DEFAULT_CONFIG_FILENAME", + # Application constants + "APP_NAME", + "APP_VERSION", + "GENERIC_NAMESPACE", + "NAMESPACE", + "OAUTH_NAMESPACE", + "PROVIDER_NAMESPACE", + # Platform constants + "PLATFORM_DARWIN", + "PLATFORM_LINUX", + "PLATFORM_WINDOWS", + # Provider constants + "PROVIDER_ANTHROPIC", + "PROVIDER_DEEPSEEK", + "PROVIDER_GROQ", + "PROVIDER_OLLAMA", + "PROVIDER_OPENAI", + "PROVIDER_XAI", + "SUPPORTED_PROVIDERS", + # JSON Schema constants + "JSON_TYPE_ARRAY", + "JSON_TYPE_BOOLEAN", + "JSON_TYPE_INTEGER", + "JSON_TYPE_NULL", + "JSON_TYPE_NUMBER", + "JSON_TYPE_OBJECT", + "JSON_TYPE_STRING", + "JSON_TYPES", + # Legacy (will be removed) + "ServerConfig", "ConfigManager", "get_config", "initialize_config", @@ -46,7 +245,54 @@ # CLI Options "load_config", "extract_server_names", - "inject_logging_env_vars", "process_options", "get_config_summary", + # Logging + "get_logger", + "setup_logging", + "setup_silent_mcp_environment", ] + + +# Convenience function for loading config +def load_runtime_config( + config_path: str | None = None, + cli_overrides: ConfigOverride | None = None, +) -> RuntimeConfig: + """Load runtime configuration with clean API. + + Args: + config_path: Path to config file (default: server_config.json) + cli_overrides: CLI argument overrides + + Returns: + RuntimeConfig instance ready to use + """ + from pathlib import Path + + path = Path(config_path or "server_config.json") + file_config = MCPConfig.load_sync(path) + return RuntimeConfig(file_config, cli_overrides) + + +async def load_runtime_config_async( + config_path: str | None = None, + cli_overrides: ConfigOverride | None = None, +) -> RuntimeConfig: + """Async load runtime configuration. + + Args: + config_path: Path to config file (default: server_config.json) + cli_overrides: CLI argument overrides + + Returns: + RuntimeConfig instance ready to use + """ + from pathlib import Path + + path = Path(config_path or "server_config.json") + file_config = await MCPConfig.load_async(path) + return RuntimeConfig(file_config, cli_overrides) + + +__all__.extend(["load_runtime_config", "load_runtime_config_async"]) diff --git a/src/mcp_cli/config/cli_options.py b/src/mcp_cli/config/cli_options.py index 45b47dfd..491bb3c8 100644 --- a/src/mcp_cli/config/cli_options.py +++ b/src/mcp_cli/config/cli_options.py @@ -45,8 +45,8 @@ def load_config(config_file: str) -> MCPConfig | None: # Try to parse as JSON to verify it's valid import json - config_path.read_text() - json.loads(config_path.read_text()) + content = config_path.read_text() + json.loads(content) except json.JSONDecodeError: # Invalid JSON - return None return None @@ -103,28 +103,6 @@ def extract_server_names( return {i: name for i, name in enumerate(enabled_servers)} -def inject_logging_env_vars(cfg: MCPConfig, quiet: bool = False) -> None: - """Inject logging environment variables into MCP server configs (modifies in place).""" - if not cfg or not cfg.servers: - return - - log_level = "ERROR" if quiet else "WARNING" - logging_env_vars = { - "PYTHONWARNINGS": "ignore", - "LOG_LEVEL": log_level, - "CHUK_LOG_LEVEL": log_level, - "MCP_LOG_LEVEL": log_level, - } - - for server_name, server_config in cfg.servers.items(): - # Only inject env vars for STDIO servers (those with 'command') - if server_config.command: - # Inject logging env vars if not already set - for env_key, env_value in logging_env_vars.items(): - if env_key not in server_config.env: - server_config.env[env_key] = env_value - - def process_options( server: str | None, disable_filesystem: bool, @@ -212,21 +190,9 @@ def process_options( logger.error(f" - {error}") # Continue anyway but warn user - # STEP 7: Handle MCP server logging - if cfg: - inject_logging_env_vars(cfg, quiet=quiet) - - # Save modified config for MCP tool manager - temp_config_path = ( - Path(config_file).parent / f"_modified_{Path(config_file).name}" - ) - try: - cfg.save_to_file(temp_config_path) - os.environ["MCP_CLI_MODIFIED_CONFIG"] = str(temp_config_path) - except Exception as e: - logger.warning(f"Failed to create modified config: {e}") - - # STEP 8: Build server list and extract server names + # STEP 7: Build server list and extract server names + # Note: Removed inject_logging_env_vars - env vars should be set at runtime, + # not injected into config files server_names = extract_server_names(cfg, user_specified) # STEP 9: Log server type detection for debugging diff --git a/src/mcp_cli/config/config_manager.py b/src/mcp_cli/config/config_manager.py index 90369a14..03534c01 100644 --- a/src/mcp_cli/config/config_manager.py +++ b/src/mcp_cli/config/config_manager.py @@ -3,6 +3,9 @@ This module provides a centralized way to manage configuration instead of loading JSON files all over the place. + +LEGACY: This module contains legacy MCPConfig that will be phased out. +Use mcp_cli.config.models.MCPConfig for new code. """ from __future__ import annotations @@ -16,10 +19,22 @@ from pydantic import BaseModel, Field from mcp_cli.auth import OAuthConfig +from mcp_cli.config.defaults import DEFAULT_PROVIDER, DEFAULT_MODEL from mcp_cli.tools.models import ServerInfo, TransportType +# Import clean models from new config system +from mcp_cli.config.models import ( + TimeoutConfig as CleanTimeoutConfig, + ToolConfig as CleanToolConfig, +) + logger = logging.getLogger(__name__) +# LEGACY: Use clean models for new code +# These are kept for backward compatibility with old code +TimeoutConfig = CleanTimeoutConfig +ToolConfig = CleanToolConfig + class ServerConfig(BaseModel): """Configuration for a single MCP server.""" @@ -48,36 +63,47 @@ def transport(self) -> TransportType: @classmethod def from_dict(cls, name: str, data: dict[str, Any]) -> "ServerConfig": - """Create from dictionary format with environment variable handling.""" - # Get env from config - env = data.get("env", {}).copy() + """Create from dictionary format with environment variable handling. + + Uses ServerConfigInput Pydantic model for validation instead of manual .get() calls. + """ + from mcp_cli.config.server_models import ServerConfigInput - # Ensure PATH is inherited from current environment if not explicitly set + # Parse using Pydantic model (validates and provides defaults) + input_model = ServerConfigInput.model_validate(data) + + # Get env and ensure PATH is inherited + env = input_model.env.copy() if "PATH" not in env: env["PATH"] = os.environ.get("PATH", "") # Parse OAuth config if present oauth = None - if "oauth" in data: - oauth = OAuthConfig.model_validate(data["oauth"]) + if input_model.oauth: + if isinstance(input_model.oauth, dict): + oauth = OAuthConfig.model_validate(input_model.oauth) + else: + oauth = input_model.oauth return cls( name=name, - command=data.get("command"), - args=data.get("args", []), + command=input_model.command, + args=input_model.args, env=env, - url=data.get("url"), - headers=data.get("headers"), + url=input_model.url, + headers=input_model.headers, oauth=oauth, - disabled=data.get("disabled", False), + disabled=input_model.disabled, ) def to_server_info(self, server_id: int = 0) -> ServerInfo: """Convert to ServerInfo model.""" + from mcp_cli.config.enums import ServerStatus + return ServerInfo( id=server_id, name=self.name, - status="configured", + status=ServerStatus.CONFIGURED.value, tool_count=0, namespace=self.name, enabled=not self.disabled, @@ -90,15 +116,28 @@ def to_server_info(self, server_id: int = 0) -> ServerInfo: ) -class MCPConfig(BaseModel): - """Complete MCP configuration.""" +class LegacyMCPConfig(BaseModel): + """LEGACY: Complete MCP configuration with ServerConfig models. + + This class is kept for backward compatibility with code that uses + ServerConfig models. New code should use CleanMCPConfig from models.py + which stores servers as plain dicts. + """ servers: dict[str, ServerConfig] = Field(default_factory=dict) - default_provider: str = "openai" - default_model: str = "gpt-4" + default_provider: str = DEFAULT_PROVIDER + default_model: str = DEFAULT_MODEL theme: str = "default" verbose: bool = True - confirm_tools: bool = True + confirm_tools: bool = True # DEPRECATED: Use tools.confirm_tools instead + + # Centralized configurations (use clean models) + timeouts: TimeoutConfig = Field( + default_factory=TimeoutConfig, description="All timeout configurations" + ) + tools: ToolConfig = Field( + default_factory=ToolConfig, description="Tool filtering and behavior" + ) # Token storage configuration token_store_backend: str = ( @@ -114,7 +153,7 @@ class MCPConfig(BaseModel): model_config = {"frozen": False, "arbitrary_types_allowed": True} @classmethod - def load_from_file(cls, config_path: Path) -> MCPConfig: + def load_from_file(cls, config_path: Path) -> "LegacyMCPConfig": """Load configuration from JSON file.""" config = cls() @@ -132,8 +171,8 @@ def load_from_file(cls, config_path: Path) -> MCPConfig: config.servers[name] = ServerConfig.from_dict(name, server_data) # Load other settings - config.default_provider = data.get("defaultProvider", "openai") - config.default_model = data.get("defaultModel", "gpt-4") + config.default_provider = data.get("defaultProvider", DEFAULT_PROVIDER) + config.default_model = data.get("defaultModel", DEFAULT_MODEL) config.theme = data.get("theme", "default") config.verbose = data.get("verbose", True) config.confirm_tools = data.get("confirmTools", True) @@ -150,6 +189,53 @@ def load_from_file(cls, config_path: Path) -> MCPConfig: ) config.vault_namespace = token_storage.get("vaultNamespace") + # Load timeout configuration (field names match clean TimeoutConfig) + if "timeouts" in data: + timeout_data = data["timeouts"] + config.timeouts = TimeoutConfig( + streaming_chunk=timeout_data.get( + "streamingChunkTimeout", + config.timeouts.streaming_chunk, + ), + streaming_global=timeout_data.get( + "streamingGlobalTimeout", + config.timeouts.streaming_global, + ), + streaming_first_chunk=timeout_data.get( + "streamingFirstChunkTimeout", + config.timeouts.streaming_first_chunk, + ), + tool_execution=timeout_data.get( + "toolExecutionTimeout", config.timeouts.tool_execution + ), + server_init=timeout_data.get( + "serverInitTimeout", config.timeouts.server_init + ), + http_request=timeout_data.get( + "httpRequestTimeout", config.timeouts.http_request + ), + http_connect=timeout_data.get( + "httpConnectTimeout", config.timeouts.http_connect + ), + ) + + # NEW: Load tool configuration + if "tools" in data: + tool_data = data["tools"] + config.tools = ToolConfig( + include_tools=tool_data.get("includeTools"), + exclude_tools=tool_data.get("excludeTools"), + dynamic_tools_enabled=tool_data.get( + "dynamicToolsEnabled", config.tools.dynamic_tools_enabled + ), + confirm_tools=tool_data.get( + "confirmTools", config.tools.confirm_tools + ), + max_concurrency=tool_data.get( + "maxConcurrency", config.tools.max_concurrency + ), + ) + except Exception as e: # Log error but return empty config print(f"Error loading config: {e}") @@ -190,6 +276,26 @@ def save_to_file(self, config_path: Path) -> None: if token_storage: data["tokenStorage"] = token_storage + # NEW: Add timeout configuration + data["timeouts"] = { + "streamingChunkTimeout": self.timeouts.streaming_chunk, + "streamingGlobalTimeout": self.timeouts.streaming_global, + "streamingFirstChunkTimeout": self.timeouts.streaming_first_chunk, + "toolExecutionTimeout": self.timeouts.tool_execution, + "serverInitTimeout": self.timeouts.server_init, + "httpRequestTimeout": self.timeouts.http_request, + "httpConnectTimeout": self.timeouts.http_connect, + } + + # NEW: Add tool configuration + data["tools"] = { + "includeTools": self.tools.include_tools, + "excludeTools": self.tools.exclude_tools, + "dynamicToolsEnabled": self.tools.dynamic_tools_enabled, + "confirmTools": self.tools.confirm_tools, + "maxConcurrency": self.tools.max_concurrency, + } + with open(config_path, "w") as f: json.dump(data, f, indent=2) @@ -217,6 +323,11 @@ def list_enabled_servers(self) -> list[ServerConfig]: return [s for s in self.servers.values() if not s.disabled] +# Export alias for backward compatibility +# ConfigManager still uses LegacyMCPConfig internally +MCPConfig = LegacyMCPConfig + + class ConfigManager: """ Manager for application configuration. @@ -257,22 +368,12 @@ def initialize(self, config_path: Path | None = None) -> MCPConfig: import importlib.resources as resources try: - # Python 3.9+ - if hasattr(resources, "files"): - package_files = resources.files("mcp_cli") - config_file = package_files / "server_config.json" - if config_file.is_file(): - self._config_path = Path(str(config_file)) - else: - # Package config doesn't exist, use cwd path anyway - self._config_path = cwd_config + package_files = resources.files("mcp_cli") + config_file = package_files / "server_config.json" + if config_file.is_file(): + self._config_path = Path(str(config_file)) else: - # Python 3.8 fallback - with resources.path("mcp_cli", "server_config.json") as p: - if p.exists(): - self._config_path = p - else: - self._config_path = cwd_config + self._config_path = cwd_config except (ImportError, FileNotFoundError, AttributeError, TypeError): # If package config doesn't exist or can't be accessed, use cwd self._config_path = cwd_config @@ -365,11 +466,21 @@ def detect_server_types( stdio_servers.append(server) continue - if server_config.url: + # Handle both dict (new clean config) and ServerConfig (legacy) + sc: Any = server_config + if isinstance(sc, dict): + url = sc.get("url") + command = sc.get("command") + else: + # ServerConfig model + url = sc.url + command = sc.command + + if url: # HTTP server - http_servers.append({"name": server, "url": server_config.url}) - logger.debug(f"Detected HTTP server: {server} -> {server_config.url}") - elif server_config.command: + http_servers.append({"name": server, "url": url}) + logger.debug(f"Detected HTTP server: {server} -> {url}") + elif command: # STDIO server stdio_servers.append(server) logger.debug(f"Detected STDIO server: {server}") @@ -408,10 +519,21 @@ def validate_server_config( server_config = cfg.servers[server] - # Check for valid configuration - has_url = server_config.url is not None - has_command = server_config.command is not None + # Handle both dict (new clean config) and ServerConfig (legacy) + sc: Any = server_config + if isinstance(sc, dict): + has_url = sc.get("url") is not None + has_command = sc.get("command") is not None + url = sc.get("url") + command = sc.get("command") + else: + # ServerConfig model + has_url = server_config.url is not None + has_command = server_config.command is not None + url = server_config.url + command = server_config.command + # Check for valid configuration if not has_url and not has_command: errors.append(f"Server '{server}' missing both 'url' and 'command' fields") elif has_url and has_command: @@ -420,15 +542,182 @@ def validate_server_config( ) elif has_url: # Validate URL format - url = server_config.url if url and not url.startswith(("http://", "https://")): errors.append( f"Server '{server}' URL must start with http:// or https://" ) elif has_command: # Validate command format - command = server_config.command if not isinstance(command, str) or not command.strip(): errors.append(f"Server '{server}' command must be a non-empty string") return len(errors) == 0, errors + + +# ============================================================================ +# Runtime Configuration Resolver +# ============================================================================ + + +class RuntimeConfig: + """ + Runtime configuration resolver with priority handling. + + Priority order (highest to lowest): + 1. CLI arguments (passed at initialization) + 2. Environment variables + 3. Config file (MCPConfig) + 4. Defaults (in TimeoutConfig/ToolConfig) + + This class provides a unified interface for accessing configuration + values, automatically resolving from the appropriate source. + """ + + def __init__( + self, + mcp_config: MCPConfig | None = None, + cli_overrides: dict[str, Any] | None = None, + ): + """ + Initialize runtime config resolver. + + Args: + mcp_config: The loaded MCPConfig (from file) + cli_overrides: Dictionary of CLI argument overrides + """ + self.mcp_config = mcp_config or MCPConfig() + self.cli_overrides = cli_overrides or {} + + def get_timeout(self, timeout_name: str) -> float: + """ + Get timeout value with priority resolution. + + Args: + timeout_name: Name of timeout (e.g., "streaming_chunk", "tool_execution") + + Returns: + Resolved timeout value in seconds + + Example: + >>> config = RuntimeConfig(mcp_config) + >>> config.get_timeout("streaming_chunk") # Returns 45.0 by default + """ + # 1. Check CLI overrides first + cli_key = f"{timeout_name}_timeout" + if cli_key in self.cli_overrides: + return float(self.cli_overrides[cli_key]) + + # 2. Check environment variables + env_key = f"MCP_{timeout_name.upper()}_TIMEOUT" + env_value = os.getenv(env_key) + if env_value: + try: + return float(env_value) + except ValueError: + logger.warning( + f"Invalid timeout value in {env_key}={env_value}, using config/default" + ) + + # Special case: MCP_TOOL_TIMEOUT applies to multiple timeouts + if timeout_name in ["streaming_chunk", "streaming_global", "tool_execution"]: + tool_timeout_env = os.getenv("MCP_TOOL_TIMEOUT") + if tool_timeout_env: + try: + return float(tool_timeout_env) + except ValueError: + pass + + # 3. Get from config file + timeout_attr = f"{timeout_name}_timeout" + if hasattr(self.mcp_config.timeouts, timeout_attr): + return getattr(self.mcp_config.timeouts, timeout_attr) # type: ignore[no-any-return] + + # 4. Fallback to default (should never reach here if TimeoutConfig has defaults) + logger.warning( + f"No timeout configuration found for '{timeout_name}', using 120.0" + ) + return 120.0 + + def get_tool_config_value(self, key: str) -> Any: + """ + Get tool configuration value with priority resolution. + + Args: + key: Configuration key (e.g., "include_tools", "confirm_tools") + + Returns: + Resolved configuration value + """ + # 1. Check CLI overrides + if key in self.cli_overrides: + return self.cli_overrides[key] + + # 2. Check environment variables + env_key = f"MCP_CLI_{key.upper()}" + env_value = os.getenv(env_key) + + if env_value is not None: + # Handle different types + if key in ["include_tools", "exclude_tools"]: + # Comma-separated list + return [t.strip() for t in env_value.split(",") if t.strip()] + elif key == "dynamic_tools_enabled": + return env_value.lower() in ["1", "true", "yes"] + elif key == "confirm_tools": + return env_value.lower() not in ["0", "false", "no"] + elif key == "max_concurrency": + try: + return int(env_value) + except ValueError: + pass + + # 3. Get from config file + if hasattr(self.mcp_config.tools, key): + return getattr(self.mcp_config.tools, key) + + # 4. Return None if not found + return None + + def get_all_timeouts(self) -> dict[str, float]: + """Get all timeout values as a dictionary.""" + return { + "streaming_chunk": self.get_timeout("streaming_chunk"), + "streaming_global": self.get_timeout("streaming_global"), + "streaming_first_chunk": self.get_timeout("streaming_first_chunk"), + "tool_execution": self.get_timeout("tool_execution"), + "server_init": self.get_timeout("server_init"), + "http_request": self.get_timeout("http_request"), + "http_connect": self.get_timeout("http_connect"), + } + + def update_from_cli(self, **kwargs) -> None: + """ + Update CLI overrides from keyword arguments. + + Args: + **kwargs: CLI argument values to override + """ + self.cli_overrides.update(kwargs) + + +def get_runtime_config( + mcp_config: MCPConfig | None = None, cli_overrides: dict[str, Any] | None = None +) -> RuntimeConfig: + """ + Convenience function to create a RuntimeConfig instance. + + Args: + mcp_config: The loaded MCPConfig (defaults to getting from ConfigManager) + cli_overrides: Dictionary of CLI argument overrides + + Returns: + RuntimeConfig instance + """ + if mcp_config is None: + try: + mcp_config = get_config() + except RuntimeError: + # Config not initialized, use defaults + mcp_config = MCPConfig() + + return RuntimeConfig(mcp_config, cli_overrides) diff --git a/src/mcp_cli/config/defaults.py b/src/mcp_cli/config/defaults.py new file mode 100644 index 00000000..73fb7a59 --- /dev/null +++ b/src/mcp_cli/config/defaults.py @@ -0,0 +1,199 @@ +"""Default configuration values - no more magic numbers! + +All default values should be defined here, not hardcoded in the code. +""" + +from __future__ import annotations + + +# ================================================================ +# Timeout Defaults (in seconds) +# ================================================================ + +# Streaming timeouts +DEFAULT_STREAMING_CHUNK_TIMEOUT = 45.0 +"""Default timeout for receiving each streaming chunk.""" + +DEFAULT_STREAMING_GLOBAL_TIMEOUT = 300.0 +"""Default total streaming timeout.""" + +DEFAULT_STREAMING_FIRST_CHUNK_TIMEOUT = 60.0 +"""Default timeout for first chunk (may need longer for complex queries).""" + +# Tool timeouts +DEFAULT_TOOL_EXECUTION_TIMEOUT = 120.0 +"""Default timeout for tool execution.""" + +# Server timeouts +DEFAULT_SERVER_INIT_TIMEOUT = 120.0 +"""Default timeout for server initialization.""" + +# HTTP timeouts +DEFAULT_HTTP_REQUEST_TIMEOUT = 30.0 +"""Default timeout for HTTP requests.""" + +DEFAULT_HTTP_CONNECT_TIMEOUT = 10.0 +"""Default timeout for HTTP connections.""" + +# Discovery/UI timeouts (moved from constants/timeouts.py) +DISCOVERY_TIMEOUT = 10.0 +"""Provider discovery HTTP timeout.""" + +REFRESH_TIMEOUT = 1.0 +"""Display refresh timeout.""" + +SHUTDOWN_TIMEOUT = 0.5 +"""Graceful shutdown timeout.""" + + +# ================================================================ +# Tool Configuration Defaults +# ================================================================ + +DEFAULT_MAX_TOOL_CONCURRENCY = 5 +"""Default maximum concurrent tool executions.""" + +DEFAULT_CONFIRM_TOOLS = True +"""Default: require confirmation before executing tools.""" + +DEFAULT_DYNAMIC_TOOLS_ENABLED = False +"""Default: dynamic tool discovery disabled.""" + + +# ================================================================ +# Conversation Defaults +# ================================================================ + +DEFAULT_MAX_TURNS = 100 +"""Default maximum conversation turns before exit.""" + +DEFAULT_SYSTEM_PROMPT = "You are a helpful AI assistant with access to tools." +"""Default system prompt.""" + + +# ================================================================ +# Provider/Model Defaults +# ================================================================ + +DEFAULT_PROVIDER = "openai" +"""Default LLM provider.""" + +DEFAULT_MODEL = "gpt-4o-mini" +"""Default LLM model.""" + + +# ================================================================ +# UI Defaults +# ================================================================ + +DEFAULT_THEME = "default" +"""Default UI theme (from chuk-term).""" + +DEFAULT_VERBOSE = True +"""Default verbosity level.""" + + +# ================================================================ +# Token/Auth Defaults +# ================================================================ + +DEFAULT_TOKEN_BACKEND = "auto" +"""Default token storage backend.""" + + +# ================================================================ +# Path Defaults +# ================================================================ + +DEFAULT_CONFIG_FILENAME = "server_config.json" +"""Default configuration filename.""" + + +# ================================================================ +# Application Constants +# ================================================================ + +NAMESPACE = "mcp-cli" +"""Application namespace.""" + +OAUTH_NAMESPACE = NAMESPACE +"""OAuth namespace (same as app namespace).""" + +PROVIDER_NAMESPACE = "provider" +"""Provider namespace for token storage.""" + +GENERIC_NAMESPACE = "generic" +"""Generic namespace.""" + +APP_NAME = "mcp-cli" +"""Application name.""" + + +# ================================================================ +# Platform Constants +# ================================================================ + +PLATFORM_WINDOWS = "win32" +"""Windows platform identifier (from sys.platform).""" + +PLATFORM_DARWIN = "darwin" +"""macOS platform identifier (from sys.platform).""" + +PLATFORM_LINUX = "linux" +"""Linux platform identifier (from sys.platform).""" + + +# ================================================================ +# Provider Constants +# ================================================================ + +PROVIDER_OLLAMA = "ollama" +PROVIDER_OPENAI = "openai" +PROVIDER_ANTHROPIC = "anthropic" +PROVIDER_GROQ = "groq" +PROVIDER_DEEPSEEK = "deepseek" +PROVIDER_XAI = "xai" + +SUPPORTED_PROVIDERS = [ + PROVIDER_OLLAMA, + PROVIDER_OPENAI, + PROVIDER_ANTHROPIC, + PROVIDER_GROQ, + PROVIDER_DEEPSEEK, + PROVIDER_XAI, +] +"""List of supported LLM providers.""" + + +# ================================================================ +# JSON Schema Type Constants +# ================================================================ + +JSON_TYPE_STRING = "string" +JSON_TYPE_NUMBER = "number" +JSON_TYPE_INTEGER = "integer" +JSON_TYPE_BOOLEAN = "boolean" +JSON_TYPE_ARRAY = "array" +JSON_TYPE_OBJECT = "object" +JSON_TYPE_NULL = "null" + +JSON_TYPES = [ + JSON_TYPE_STRING, + JSON_TYPE_NUMBER, + JSON_TYPE_INTEGER, + JSON_TYPE_BOOLEAN, + JSON_TYPE_ARRAY, + JSON_TYPE_OBJECT, + JSON_TYPE_NULL, +] +"""All valid JSON Schema types.""" + + +# ================================================================ +# Middleware Configuration +# ================================================================ +# Middleware (retry, circuit breaker, rate limiting) is provided by +# chuk-tool-processor. See chuk_tool_processor.mcp.MiddlewareConfig +# for configuration options. +DEFAULT_MIDDLEWARE_ENABLED = True +"""Enable CTP middleware by default.""" diff --git a/src/mcp_cli/config/discovery.py b/src/mcp_cli/config/discovery.py index 2c58ecf4..4195a0e0 100644 --- a/src/mcp_cli/config/discovery.py +++ b/src/mcp_cli/config/discovery.py @@ -2,6 +2,7 @@ ChukLLM discovery and provider management. This module handles the discovery and validation of ChukLLM providers and models. +Uses singleton pattern instead of module-level globals for cleaner state management. """ from __future__ import annotations @@ -12,87 +13,175 @@ logger = logging.getLogger(__name__) -# Global flags to ensure we only set up once -_ENV_SETUP_COMPLETE = False -_DISCOVERY_TRIGGERED = False +class DiscoveryManager: + """Singleton manager for ChukLLM discovery state. -def setup_chuk_llm_environment() -> None: - """ - Set up environment variables for ChukLLM discovery. - MUST be called before any chuk_llm imports. + Replaces module-level globals with a proper class-based singleton. + Thread-safe via Python's GIL for simple flag operations. """ - global _ENV_SETUP_COMPLETE - if _ENV_SETUP_COMPLETE: - return + _instance: "DiscoveryManager | None" = None - # Set environment variables (only if not already set by user) - env_vars = { - "CHUK_LLM_DISCOVERY_ENABLED": "true", - "CHUK_LLM_AUTO_DISCOVER": "true", - "CHUK_LLM_DISCOVERY_ON_STARTUP": "true", - "CHUK_LLM_DISCOVERY_TIMEOUT": "10", - "CHUK_LLM_OLLAMA_DISCOVERY": "true", - "CHUK_LLM_OPENAI_DISCOVERY": "true", - "CHUK_LLM_OPENAI_TOOL_COMPATIBILITY": "true", - "CHUK_LLM_UNIVERSAL_TOOLS": "true", - } + # Instance attributes with type annotations + _env_setup_complete: bool + _discovery_triggered: bool - for key, value in env_vars.items(): - if key not in os.environ: - os.environ[key] = value + def __new__(cls) -> "DiscoveryManager": + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._env_setup_complete = False + cls._instance._discovery_triggered = False + return cls._instance - _ENV_SETUP_COMPLETE = True - logger.debug("ChukLLM environment variables set") + @property + def env_setup_complete(self) -> bool: + """Whether environment setup has been completed.""" + return self._env_setup_complete + @property + def discovery_triggered(self) -> bool: + """Whether discovery has been triggered.""" + return self._discovery_triggered -def trigger_discovery_after_setup() -> int: - """ - Trigger discovery after environment setup. - Call this after setup_chuk_llm_environment() and before using models. + def setup_environment(self) -> None: + """Set up environment variables for ChukLLM discovery. - Returns: - Number of new functions discovered - """ - global _DISCOVERY_TRIGGERED + MUST be called before any chuk_llm imports. + """ + if self._env_setup_complete: + return - if _DISCOVERY_TRIGGERED: - return 0 + # Set environment variables (only if not already set by user) + env_vars = { + "CHUK_LLM_DISCOVERY_ENABLED": "true", + "CHUK_LLM_AUTO_DISCOVER": "true", + "CHUK_LLM_DISCOVERY_ON_STARTUP": "true", + "CHUK_LLM_DISCOVERY_TIMEOUT": "10", + "CHUK_LLM_OLLAMA_DISCOVERY": "true", + "CHUK_LLM_OPENAI_DISCOVERY": "true", + "CHUK_LLM_OPENAI_TOOL_COMPATIBILITY": "true", + "CHUK_LLM_UNIVERSAL_TOOLS": "true", + } - try: - # Import discovery functions - from chuk_llm.api.providers import trigger_ollama_discovery_and_refresh + for key, value in env_vars.items(): + if key not in os.environ: + os.environ[key] = value - logger.debug("Triggering Ollama discovery from cli_options...") + self._env_setup_complete = True + logger.debug("ChukLLM environment variables set") - # Trigger Ollama discovery to get all available models - new_functions = trigger_ollama_discovery_and_refresh() + def trigger_discovery(self, provider: str | None = None) -> int: + """Trigger discovery after environment setup. - _DISCOVERY_TRIGGERED = True + Call this after setup_environment() and before using models. - if new_functions: - logger.debug(f"CLI discovery: {len(new_functions)} new Ollama functions") - else: - logger.debug("CLI discovery: no new functions (may already be cached)") + Args: + provider: Specific provider to discover (None for all configured providers) - return len(new_functions) + Returns: + Number of new functions discovered + """ + if self._discovery_triggered and provider is None: + return 0 - except Exception as e: - logger.debug(f"CLI discovery failed: {e}") - return 0 + try: + from chuk_llm.api.providers import refresh_provider_functions + + logger.debug(f"Triggering discovery for provider: {provider or 'all'}...") + + # Trigger discovery for specified provider (or all if None) + new_functions = refresh_provider_functions(provider) + + if provider is None: + self._discovery_triggered = True + + if new_functions: + logger.debug(f"CLI discovery: {len(new_functions)} new functions") + else: + logger.debug("CLI discovery: no new functions (may already be cached)") + + return len(new_functions) + + except Exception as e: + logger.debug(f"CLI discovery failed: {e}") + return 0 + + def force_refresh(self) -> int: + """Force a fresh discovery (useful for debugging). + + Returns: + Number of new functions discovered + """ + from mcp_cli.config.env_vars import EnvVar, set_env + + self._discovery_triggered = False + + # Set force refresh environment variable (using constant) + set_env(EnvVar.CHUK_LLM_DISCOVERY_FORCE_REFRESH, "true") + + # Trigger discovery again + return self.trigger_discovery() + + def get_status(self) -> dict[str, Any]: + """Get discovery status for debugging. + + Returns: + Dictionary with discovery status information + """ + from mcp_cli.config.discovery_models import DiscoveryConfig, DiscoveryStatus + + status = DiscoveryStatus( + env_setup_complete=self._env_setup_complete, + discovery_triggered=self._discovery_triggered, + config=DiscoveryConfig.from_env(), + ) + return status.to_dict() -def get_available_models_quick(provider: str = "ollama") -> list[str]: +# Singleton instance - use this for access +_discovery_manager = DiscoveryManager() + + +def get_discovery_manager() -> DiscoveryManager: + """Get the singleton DiscoveryManager instance.""" + return _discovery_manager + + +# ────────────────────────────────────────────────────────────────────────────── +# Backward-compatible module-level functions (delegate to singleton) +# ────────────────────────────────────────────────────────────────────────────── + + +def setup_chuk_llm_environment() -> None: + """Set up environment variables for ChukLLM discovery. + + MUST be called before any chuk_llm imports. + """ + _discovery_manager.setup_environment() + + +def trigger_discovery_after_setup() -> int: + """Trigger discovery after environment setup. + + Returns: + Number of new functions discovered """ - Quick function to get available models after discovery. + return _discovery_manager.trigger_discovery() + + +def get_available_models_quick(provider: str | None = None) -> list[str]: + """Quick function to get available models after discovery. Args: - provider: Provider name (default: "ollama") + provider: Provider name (uses DEFAULT_PROVIDER if None) Returns: List of available model names """ + from mcp_cli.config.defaults import DEFAULT_PROVIDER + + provider = provider or DEFAULT_PROVIDER try: from chuk_llm.llm.client import list_available_providers @@ -106,8 +195,7 @@ def get_available_models_quick(provider: str = "ollama") -> list[str]: def validate_provider_exists(provider: str) -> bool: - """ - Validate provider exists, potentially after discovery. + """Validate provider exists, potentially after discovery. Args: provider: Provider name to validate @@ -126,35 +214,18 @@ def validate_provider_exists(provider: str) -> bool: def get_discovery_status() -> dict[str, Any]: - """ - Get discovery status for debugging. + """Get discovery status for debugging. Returns: Dictionary with discovery status information """ - return { - "env_setup_complete": _ENV_SETUP_COMPLETE, - "discovery_triggered": _DISCOVERY_TRIGGERED, - "discovery_enabled": os.getenv("CHUK_LLM_DISCOVERY_ENABLED", "false"), - "ollama_discovery": os.getenv("CHUK_LLM_OLLAMA_DISCOVERY", "false"), - "auto_discover": os.getenv("CHUK_LLM_AUTO_DISCOVER", "false"), - "tool_compatibility": os.getenv("CHUK_LLM_OPENAI_TOOL_COMPATIBILITY", "false"), - "universal_tools": os.getenv("CHUK_LLM_UNIVERSAL_TOOLS", "false"), - } + return _discovery_manager.get_status() def force_discovery_refresh() -> int: - """ - Force a fresh discovery (useful for debugging). + """Force a fresh discovery (useful for debugging). Returns: Number of new functions discovered """ - global _DISCOVERY_TRIGGERED - _DISCOVERY_TRIGGERED = False - - # Set force refresh environment variable - os.environ["CHUK_LLM_DISCOVERY_FORCE_REFRESH"] = "true" - - # Trigger discovery again - return trigger_discovery_after_setup() + return _discovery_manager.force_refresh() diff --git a/src/mcp_cli/config/discovery_models.py b/src/mcp_cli/config/discovery_models.py new file mode 100644 index 00000000..a291d35d --- /dev/null +++ b/src/mcp_cli/config/discovery_models.py @@ -0,0 +1,90 @@ +"""Pydantic models for discovery configuration - no more dict goop!""" + +from __future__ import annotations + +from pydantic import BaseModel, Field + +from mcp_cli.config.env_vars import EnvVar, get_env_bool + + +class DiscoveryConfig(BaseModel): + """ChukLLM discovery configuration loaded from environment. + + All discovery settings in one type-safe, immutable model. + """ + + discovery_enabled: bool = Field( + default=False, + description="Whether discovery is enabled", + ) + ollama_discovery: bool = Field( + default=False, + description="Whether Ollama discovery is enabled", + ) + auto_discover: bool = Field( + default=False, + description="Whether auto-discovery is enabled", + ) + tool_compatibility: bool = Field( + default=False, + description="Whether OpenAI tool compatibility is enabled", + ) + universal_tools: bool = Field( + default=False, + description="Whether universal tools are enabled", + ) + + model_config = {"frozen": True} + + @classmethod + def from_env(cls) -> "DiscoveryConfig": + """Load discovery configuration from environment variables. + + Uses EnvVar enum for type-safe environment access. + """ + return cls( + discovery_enabled=get_env_bool( + EnvVar.CHUK_LLM_DISCOVERY_ENABLED, default=False + ), + ollama_discovery=get_env_bool( + EnvVar.CHUK_LLM_OLLAMA_DISCOVERY, default=False + ), + auto_discover=get_env_bool(EnvVar.CHUK_LLM_AUTO_DISCOVER, default=False), + tool_compatibility=get_env_bool( + EnvVar.CHUK_LLM_OPENAI_TOOL_COMPATIBILITY, default=False + ), + universal_tools=get_env_bool( + EnvVar.CHUK_LLM_UNIVERSAL_TOOLS, default=False + ), + ) + + def to_dict(self) -> dict[str, bool]: + """Convert to dictionary for compatibility.""" + return self.model_dump() # type: ignore[no-any-return] + + +class DiscoveryStatus(BaseModel): + """Discovery status for debugging. + + Includes both runtime state and configuration. + """ + + env_setup_complete: bool + discovery_triggered: bool + config: DiscoveryConfig + + model_config = {"frozen": True} + + def to_dict(self) -> dict[str, bool | dict[str, bool]]: + """Convert to dictionary with flattened config for backward compatibility.""" + return { + "env_setup_complete": self.env_setup_complete, + "discovery_triggered": self.discovery_triggered, + **{f"{k}": v for k, v in self.config.to_dict().items()}, + } + + +__all__ = [ + "DiscoveryConfig", + "DiscoveryStatus", +] diff --git a/src/mcp_cli/config/enums.py b/src/mcp_cli/config/enums.py new file mode 100644 index 00000000..56dbd66b --- /dev/null +++ b/src/mcp_cli/config/enums.py @@ -0,0 +1,159 @@ +"""Configuration enums - no magic strings! + +All enums for type-safe configuration and commands. +""" + +from __future__ import annotations + +from enum import Enum + + +# ================================================================ +# Configuration Enums +# ================================================================ + + +class TimeoutType(str, Enum): + """All timeout configuration types - type-safe timeout keys.""" + + STREAMING_CHUNK = "streaming_chunk" + STREAMING_GLOBAL = "streaming_global" + STREAMING_FIRST_CHUNK = "streaming_first_chunk" + TOOL_EXECUTION = "tool_execution" + SERVER_INIT = "server_init" + HTTP_REQUEST = "http_request" + HTTP_CONNECT = "http_connect" + + +class TokenBackend(str, Enum): + """Token storage backend types.""" + + AUTO = "auto" + KEYCHAIN = "keychain" + WINDOWS = "windows" + SECRET_SERVICE = "secretservice" + ENCRYPTED = "encrypted" + VAULT = "vault" + + +class ConfigSource(str, Enum): + """Configuration value source for priority resolution.""" + + CLI = "cli" + ENV = "env" + FILE = "file" + DEFAULT = "default" + + +# NOTE: Theme names come from chuk-term.ui.theme.Theme +# Valid values: "default", "dark", "light", "minimal", "terminal" +# We don't duplicate them here - use strings and let chuk-term validate + + +# ================================================================ +# Server/Status Enums +# ================================================================ + + +class ServerStatus(str, Enum): + """Server status values.""" + + CONFIGURED = "configured" + CONNECTED = "connected" + DISCONNECTED = "disconnected" + ERROR = "error" + HEALTHY = "healthy" + UNHEALTHY = "unhealthy" + + +# ================================================================ +# Command Action Enums +# ================================================================ + + +class ConversationAction(str, Enum): + """Actions for /conversation command.""" + + SHOW = "show" + CLEAR = "clear" + SAVE = "save" + LOAD = "load" + + +class TokenAction(str, Enum): + """Actions for /token command.""" + + LIST = "list" + SET = "set" + GET = "get" + DELETE = "delete" + CLEAR = "clear" + BACKENDS = "backends" + SET_PROVIDER = "set-provider" + GET_PROVIDER = "get-provider" + DELETE_PROVIDER = "delete-provider" + + +class OutputFormat(str, Enum): + """Output format types for command results.""" + + JSON = "json" + TABLE = "table" + TEXT = "text" + TREE = "tree" + + +class TokenNamespace(str, Enum): + """Token storage namespaces.""" + + GENERIC = "generic" + PROVIDER = "provider" + BEARER = "bearer" + API_KEY = "api-key" + OAUTH = "oauth" + + +class ServerAction(str, Enum): + """Actions for /server command.""" + + ENABLE = "enable" + DISABLE = "disable" + STATUS = "status" + INFO = "info" + + +class ToolAction(str, Enum): + """Actions for /tools command.""" + + LIST = "list" + ENABLE = "enable" + DISABLE = "disable" + CONFIRM = "confirm" + INFO = "info" + + +class ThemeAction(str, Enum): + """Actions for /theme command.""" + + SET = "set" + LIST = "list" + SHOW = "show" + + +__all__ = [ + # Configuration enums + "TimeoutType", + "TokenBackend", + "ConfigSource", + # Server enums + "ServerStatus", + # Command action enums + "ConversationAction", + "TokenAction", + "ServerAction", + "ToolAction", + "ThemeAction", + # Format/Namespace enums + "OutputFormat", + "TokenNamespace", +] diff --git a/src/mcp_cli/config/env_vars.py b/src/mcp_cli/config/env_vars.py new file mode 100644 index 00000000..5e6b0707 --- /dev/null +++ b/src/mcp_cli/config/env_vars.py @@ -0,0 +1,221 @@ +"""Environment variable names - centralized, type-safe, no magic strings! + +All environment variable access should go through this module. +""" + +from __future__ import annotations + +import os +from enum import Enum + + +class EnvVar(str, Enum): + """All environment variable names used by MCP CLI. + + Use these instead of hardcoded strings for type safety. + """ + + # ================================================================ + # Timeout Configuration + # ================================================================ + TOOL_TIMEOUT = "MCP_TOOL_TIMEOUT" + STREAMING_CHUNK_TIMEOUT = "MCP_STREAMING_CHUNK_TIMEOUT" + STREAMING_GLOBAL_TIMEOUT = "MCP_STREAMING_GLOBAL_TIMEOUT" + STREAMING_FIRST_CHUNK_TIMEOUT = "MCP_STREAMING_FIRST_CHUNK_TIMEOUT" + TOOL_EXECUTION_TIMEOUT = "MCP_TOOL_EXECUTION_TIMEOUT" + SERVER_INIT_TIMEOUT = "MCP_SERVER_INIT_TIMEOUT" + + # ================================================================ + # Tool Configuration + # ================================================================ + CLI_INCLUDE_TOOLS = "MCP_CLI_INCLUDE_TOOLS" + CLI_EXCLUDE_TOOLS = "MCP_CLI_EXCLUDE_TOOLS" + CLI_DYNAMIC_TOOLS = "MCP_CLI_DYNAMIC_TOOLS" + CLI_MODIFIED_CONFIG = "MCP_CLI_MODIFIED_CONFIG" + + # ================================================================ + # Token/Auth Configuration + # ================================================================ + CLI_TOKEN_BACKEND = "MCP_CLI_TOKEN_BACKEND" + + # ================================================================ + # LLM Provider Configuration + # ================================================================ + LLM_PROVIDER = "LLM_PROVIDER" + LLM_MODEL = "LLM_MODEL" + PROVIDER = "MCP_PROVIDER" + MODEL = "MCP_MODEL" + + # ================================================================ + # Paths and Filesystem + # ================================================================ + SOURCE_FILESYSTEMS = "SOURCE_FILESYSTEMS" + + # ================================================================ + # ChukLLM Discovery Configuration + # ================================================================ + CHUK_LLM_DISCOVERY_ENABLED = "CHUK_LLM_DISCOVERY_ENABLED" + CHUK_LLM_OLLAMA_DISCOVERY = "CHUK_LLM_OLLAMA_DISCOVERY" + CHUK_LLM_AUTO_DISCOVER = "CHUK_LLM_AUTO_DISCOVER" + CHUK_LLM_OPENAI_TOOL_COMPATIBILITY = "CHUK_LLM_OPENAI_TOOL_COMPATIBILITY" + CHUK_LLM_UNIVERSAL_TOOLS = "CHUK_LLM_UNIVERSAL_TOOLS" + CHUK_LLM_DISCOVERY_FORCE_REFRESH = "CHUK_LLM_DISCOVERY_FORCE_REFRESH" + + # ================================================================ + # System (typically inherited, not set by MCP CLI) + # ================================================================ + PATH = "PATH" + HOME = "HOME" + USER = "USER" + + +# ================================================================ +# Type-Safe Helper Functions +# ================================================================ + + +def get_env(var: EnvVar, default: str | None = None) -> str | None: + """Get environment variable value (type-safe). + + Args: + var: EnvVar enum member + default: Default value if not set + + Returns: + Environment variable value or default + + Example: + >>> timeout = get_env(EnvVar.TOOL_TIMEOUT, "120") + """ + return os.getenv(var.value, default) + + +def set_env(var: EnvVar, value: str) -> None: + """Set environment variable (type-safe). + + Args: + var: EnvVar enum member + value: Value to set + + Example: + >>> set_env(EnvVar.TOOL_TIMEOUT, "600") + """ + os.environ[var.value] = value + + +def unset_env(var: EnvVar) -> None: + """Unset environment variable if it exists. + + Args: + var: EnvVar enum member + + Example: + >>> unset_env(EnvVar.TOOL_TIMEOUT) + """ + os.environ.pop(var.value, None) + + +def is_set(var: EnvVar) -> bool: + """Check if environment variable is set. + + Args: + var: EnvVar enum member + + Returns: + True if variable is set (even if empty string) + + Example: + >>> if is_set(EnvVar.TOOL_TIMEOUT): + ... print("Timeout is configured") + """ + return var.value in os.environ + + +def get_env_int(var: EnvVar, default: int | None = None) -> int | None: + """Get environment variable as integer. + + Args: + var: EnvVar enum member + default: Default value if not set or invalid + + Returns: + Integer value or default + + Example: + >>> timeout = get_env_int(EnvVar.TOOL_TIMEOUT, 120) + """ + value = get_env(var) + if value is None: + return default + + try: + return int(value) + except ValueError: + return default + + +def get_env_float(var: EnvVar, default: float | None = None) -> float | None: + """Get environment variable as float. + + Args: + var: EnvVar enum member + default: Default value if not set or invalid + + Returns: + Float value or default + + Example: + >>> timeout = get_env_float(EnvVar.TOOL_TIMEOUT, 120.0) + """ + value = get_env(var) + if value is None: + return default + + try: + return float(value) + except ValueError: + return default + + +def get_env_bool(var: EnvVar, default: bool = False) -> bool: + """Get environment variable as boolean. + + Args: + var: EnvVar enum member + default: Default value if not set + + Returns: + Boolean value (true for "1", "true", "yes", case-insensitive) + + Example: + >>> dynamic = get_env_bool(EnvVar.CLI_DYNAMIC_TOOLS, False) + """ + value = get_env(var) + if value is None: + return default + + return value.lower() in ("1", "true", "yes", "on") + + +def get_env_list( + var: EnvVar, separator: str = ",", default: list[str] | None = None +) -> list[str]: + """Get environment variable as list of strings. + + Args: + var: EnvVar enum member + separator: String separator (default: comma) + default: Default value if not set + + Returns: + List of strings (stripped of whitespace) + + Example: + >>> tools = get_env_list(EnvVar.CLI_INCLUDE_TOOLS, default=[]) + # "tool1, tool2, tool3" -> ["tool1", "tool2", "tool3"] + """ + value = get_env(var) + if value is None: + return default or [] + + return [item.strip() for item in value.split(separator) if item.strip()] diff --git a/src/mcp_cli/logging_config.py b/src/mcp_cli/config/logging.py similarity index 99% rename from src/mcp_cli/logging_config.py rename to src/mcp_cli/config/logging.py index 5d91b601..3865b7b4 100644 --- a/src/mcp_cli/logging_config.py +++ b/src/mcp_cli/config/logging.py @@ -1,4 +1,4 @@ -# mcp_cli/logging_config.py +# mcp_cli/config/logging.py """ Centralized logging configuration for MCP CLI. """ diff --git a/src/mcp_cli/config/models.py b/src/mcp_cli/config/models.py new file mode 100644 index 00000000..5ba93995 --- /dev/null +++ b/src/mcp_cli/config/models.py @@ -0,0 +1,228 @@ +"""Clean Pydantic configuration models - async native, type safe.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from pydantic import BaseModel, Field, field_validator + +from mcp_cli.config.enums import TimeoutType, TokenBackend +from mcp_cli.config.defaults import ( + DEFAULT_HTTP_CONNECT_TIMEOUT, + DEFAULT_HTTP_REQUEST_TIMEOUT, + DEFAULT_SERVER_INIT_TIMEOUT, + DEFAULT_STREAMING_CHUNK_TIMEOUT, + DEFAULT_STREAMING_FIRST_CHUNK_TIMEOUT, + DEFAULT_STREAMING_GLOBAL_TIMEOUT, + DEFAULT_TOOL_EXECUTION_TIMEOUT, + DEFAULT_MAX_TOOL_CONCURRENCY, + DEFAULT_CONFIRM_TOOLS, + DEFAULT_DYNAMIC_TOOLS_ENABLED, + DEFAULT_PROVIDER, + DEFAULT_MODEL, + DEFAULT_THEME, + DEFAULT_VERBOSE, +) + + +class TimeoutConfig(BaseModel): + """Timeout configuration with proper defaults. + + All values in seconds. Immutable after creation. + No more magic numbers! + """ + + streaming_chunk: float = Field( + default=DEFAULT_STREAMING_CHUNK_TIMEOUT, + gt=0, + description="Timeout for each streaming chunk", + ) + streaming_global: float = Field( + default=DEFAULT_STREAMING_GLOBAL_TIMEOUT, + gt=0, + description="Maximum total streaming duration", + ) + streaming_first_chunk: float = Field( + default=DEFAULT_STREAMING_FIRST_CHUNK_TIMEOUT, + gt=0, + description="Timeout for first chunk (may be longer)", + ) + tool_execution: float = Field( + default=DEFAULT_TOOL_EXECUTION_TIMEOUT, + gt=0, + description="Tool execution timeout", + ) + server_init: float = Field( + default=DEFAULT_SERVER_INIT_TIMEOUT, + gt=0, + description="Server initialization timeout", + ) + http_request: float = Field( + default=DEFAULT_HTTP_REQUEST_TIMEOUT, + gt=0, + description="HTTP request timeout", + ) + http_connect: float = Field( + default=DEFAULT_HTTP_CONNECT_TIMEOUT, + gt=0, + description="HTTP connection timeout", + ) + + model_config = {"frozen": True} + + def get(self, timeout_type: TimeoutType) -> float: + """Get timeout by enum (type-safe).""" + return getattr(self, timeout_type.value) # type: ignore[no-any-return] + + async def get_async(self, timeout_type: TimeoutType) -> float: + """Async getter for consistency.""" + return self.get(timeout_type) + + +class ToolConfig(BaseModel): + """Tool behavior configuration. + + No more magic numbers! + """ + + include_tools: list[str] | None = Field( + default=None, + description="Whitelist of tool names (None = all)", + ) + exclude_tools: list[str] | None = Field( + default=None, + description="Blacklist of tool names", + ) + dynamic_tools_enabled: bool = Field( + default=DEFAULT_DYNAMIC_TOOLS_ENABLED, + description="Enable dynamic tool discovery", + ) + confirm_tools: bool = Field( + default=DEFAULT_CONFIRM_TOOLS, + description="Require confirmation before execution", + ) + max_concurrency: int = Field( + default=DEFAULT_MAX_TOOL_CONCURRENCY, + gt=0, + le=100, + description="Max concurrent tool executions", + ) + + model_config = {"frozen": True} + + @field_validator("include_tools", "exclude_tools") + @classmethod + def validate_tool_lists(cls, v: list[str] | None) -> list[str] | None: + """Ensure tool lists are non-empty if provided.""" + if v is not None and len(v) == 0: + return None + return v + + +class VaultConfig(BaseModel): + """HashiCorp Vault configuration.""" + + url: str | None = None + token: str | None = None + mount_point: str = "secret" + path_prefix: str = "mcp-cli/oauth" + namespace: str | None = None + + model_config = {"frozen": True} + + +class TokenStorageConfig(BaseModel): + """Token storage configuration.""" + + backend: TokenBackend = TokenBackend.AUTO + password: str | None = Field(default=None, repr=False) # Don't print passwords + vault: VaultConfig = Field(default_factory=VaultConfig) + + model_config = {"frozen": True} + + +class MCPConfig(BaseModel): + """Complete MCP configuration - clean, immutable, type-safe. + + This is the source of truth loaded from config files. + RuntimeConfig wraps this with CLI/env overrides. + """ + + # Provider/Model defaults (no more magic strings!) + default_provider: str = DEFAULT_PROVIDER + default_model: str = DEFAULT_MODEL + + # UI (theme names validated by chuk-term) + theme: str = DEFAULT_THEME # default|dark|light|minimal|terminal + verbose: bool = DEFAULT_VERBOSE + + # Configurations + timeouts: TimeoutConfig = Field(default_factory=TimeoutConfig) + tools: ToolConfig = Field(default_factory=ToolConfig) + token_storage: TokenStorageConfig = Field(default_factory=TokenStorageConfig) + + # Servers - kept as dict for flexibility but typed + # Use alias to support both 'servers' and 'mcpServers' from config files + servers: dict[str, Any] = Field(default_factory=dict, alias="mcpServers") + + model_config = {"frozen": True, "populate_by_name": True} + + @classmethod + async def load_async(cls, config_path: Path) -> MCPConfig: + """Async load from file (future-proof for async I/O).""" + import asyncio + import json + + if not config_path.exists(): + return cls() + + # Use asyncio for file I/O + loop = asyncio.get_event_loop() + data = await loop.run_in_executor(None, config_path.read_text) + parsed = json.loads(data) + + return cls.model_validate(parsed) # type: ignore[no-any-return] + + @classmethod + def load_sync(cls, config_path: Path) -> MCPConfig: + """Synchronous load (for backward compat during transition).""" + import json + + if not config_path.exists(): + return cls() + + data = json.loads(config_path.read_text()) + return cls.model_validate(data) # type: ignore[no-any-return] + + @classmethod + def load_from_file(cls, config_path: Path) -> MCPConfig: + """Alias for load_sync for backward compatibility.""" + return cls.load_sync(config_path) + + +class ConfigOverride(BaseModel): + """Type-safe configuration override from CLI/env. + + Use this instead of dict[str, Any] for CLI arguments. + """ + + timeouts: dict[TimeoutType, float] = Field(default_factory=dict) + tools: dict[str, Any] = Field(default_factory=dict) + provider: str | None = None + model: str | None = None + theme: str | None = None # Theme name - validated by chuk-term + + model_config = {"frozen": False} # Mutable for building + + def set_timeout(self, timeout_type: TimeoutType, value: float) -> None: + """Type-safe timeout override.""" + if value <= 0: + raise ValueError(f"Timeout must be positive: {value}") + self.timeouts[timeout_type] = value + + def apply_tool_timeout_to_all(self, value: float) -> None: + """Apply single timeout to all relevant types.""" + self.set_timeout(TimeoutType.STREAMING_CHUNK, value) + self.set_timeout(TimeoutType.STREAMING_GLOBAL, value) + self.set_timeout(TimeoutType.TOOL_EXECUTION, value) diff --git a/src/mcp_cli/config/runtime.py b/src/mcp_cli/config/runtime.py new file mode 100644 index 00000000..e9e054f8 --- /dev/null +++ b/src/mcp_cli/config/runtime.py @@ -0,0 +1,250 @@ +"""Runtime configuration resolver - async native, type safe, no magic strings.""" + +from __future__ import annotations + +import logging +from typing import Any, Generic, TypeVar + +from pydantic import BaseModel + +from mcp_cli.config.enums import ConfigSource, TimeoutType +from mcp_cli.config.models import ConfigOverride, MCPConfig, TimeoutConfig, ToolConfig +from mcp_cli.config.env_vars import ( + EnvVar, + get_env, + get_env_bool, + get_env_float, + get_env_list, +) + +logger = logging.getLogger(__name__) + +T = TypeVar("T") + + +class ResolvedValue(BaseModel, Generic[T]): + """A configuration value with its source for debugging.""" + + value: T + source: ConfigSource + + model_config = {"frozen": True, "arbitrary_types_allowed": True} + + +class RuntimeConfig: + """Runtime configuration resolver with 4-tier priority. + + Priority (highest to lowest): + 1. CLI overrides (ConfigOverride) + 2. Environment variables + 3. File config (MCPConfig) + 4. Defaults + + Pure async, type-safe, immutable after creation. + """ + + def __init__( + self, + file_config: MCPConfig, + cli_overrides: ConfigOverride | None = None, + ): + """Initialize runtime config. + + Args: + file_config: Loaded configuration from file + cli_overrides: CLI argument overrides (type-safe) + """ + self._file_config = file_config + self._cli_overrides = cli_overrides or ConfigOverride() + + # Cache resolved values + self._timeout_cache: dict[TimeoutType, ResolvedValue[float]] = {} + + # ================================================================ + # Timeout Resolution + # ================================================================ + + def get_timeout(self, timeout_type: TimeoutType) -> float: + """Get timeout with priority resolution (sync).""" + if timeout_type in self._timeout_cache: + return self._timeout_cache[timeout_type].value + + resolved = self._resolve_timeout(timeout_type) + self._timeout_cache[timeout_type] = resolved + return resolved.value + + async def get_timeout_async(self, timeout_type: TimeoutType) -> float: + """Get timeout asynchronously.""" + return self.get_timeout(timeout_type) + + def _resolve_timeout(self, timeout_type: TimeoutType) -> ResolvedValue[float]: + """Resolve timeout from all sources with priority (type-safe!).""" + # 1. Check CLI overrides + if timeout_type in self._cli_overrides.timeouts: + value = self._cli_overrides.timeouts[timeout_type] + logger.debug(f"Timeout {timeout_type.value} from CLI: {value}s") + return ResolvedValue(value=value, source=ConfigSource.CLI) + + # 2. Check environment variables (type-safe with EnvVar!) + # Map timeout types to env vars + env_var_map = { + TimeoutType.STREAMING_CHUNK: EnvVar.STREAMING_CHUNK_TIMEOUT, + TimeoutType.STREAMING_GLOBAL: EnvVar.STREAMING_GLOBAL_TIMEOUT, + TimeoutType.STREAMING_FIRST_CHUNK: EnvVar.STREAMING_FIRST_CHUNK_TIMEOUT, + TimeoutType.TOOL_EXECUTION: EnvVar.TOOL_EXECUTION_TIMEOUT, + TimeoutType.SERVER_INIT: EnvVar.SERVER_INIT_TIMEOUT, + } + + if timeout_type in env_var_map: + env_value = get_env_float(env_var_map[timeout_type]) + if env_value is not None and env_value > 0: + logger.debug(f"Timeout {timeout_type.value} from ENV: {env_value}s") + return ResolvedValue(value=env_value, source=ConfigSource.ENV) + + # Special case: MCP_TOOL_TIMEOUT applies to multiple (type-safe!) + if timeout_type in [ + TimeoutType.STREAMING_CHUNK, + TimeoutType.STREAMING_GLOBAL, + TimeoutType.TOOL_EXECUTION, + ]: + tool_timeout = get_env_float(EnvVar.TOOL_TIMEOUT) + if tool_timeout is not None and tool_timeout > 0: + logger.debug( + f"Timeout {timeout_type.value} from MCP_TOOL_TIMEOUT: {tool_timeout}s" + ) + return ResolvedValue(value=tool_timeout, source=ConfigSource.ENV) + + # 3. Check file config + value = self._file_config.timeouts.get(timeout_type) + logger.debug(f"Timeout {timeout_type.value} from config file: {value}s") + return ResolvedValue(value=value, source=ConfigSource.FILE) + + def get_all_timeouts(self) -> TimeoutConfig: + """Get all resolved timeouts as immutable config.""" + return TimeoutConfig( + streaming_chunk=self.get_timeout(TimeoutType.STREAMING_CHUNK), + streaming_global=self.get_timeout(TimeoutType.STREAMING_GLOBAL), + streaming_first_chunk=self.get_timeout(TimeoutType.STREAMING_FIRST_CHUNK), + tool_execution=self.get_timeout(TimeoutType.TOOL_EXECUTION), + server_init=self.get_timeout(TimeoutType.SERVER_INIT), + http_request=self.get_timeout(TimeoutType.HTTP_REQUEST), + http_connect=self.get_timeout(TimeoutType.HTTP_CONNECT), + ) + + async def get_all_timeouts_async(self) -> TimeoutConfig: + """Get all timeouts asynchronously.""" + return self.get_all_timeouts() + + # ================================================================ + # Tool Configuration + # ================================================================ + + def get_tool_config(self) -> ToolConfig: + """Get resolved tool configuration.""" + # CLI overrides take precedence + include_tools = self._get_tool_list("include_tools") + exclude_tools = self._get_tool_list("exclude_tools") + dynamic_enabled = self._get_tool_bool("dynamic_tools_enabled") + confirm_tools = self._get_tool_bool("confirm_tools") + max_concurrency = self._get_tool_int("max_concurrency") + + return ToolConfig( + include_tools=include_tools or self._file_config.tools.include_tools, + exclude_tools=exclude_tools or self._file_config.tools.exclude_tools, + dynamic_tools_enabled=dynamic_enabled + if dynamic_enabled is not None + else self._file_config.tools.dynamic_tools_enabled, + confirm_tools=confirm_tools + if confirm_tools is not None + else self._file_config.tools.confirm_tools, + max_concurrency=max_concurrency or self._file_config.tools.max_concurrency, + ) + + async def get_tool_config_async(self) -> ToolConfig: + """Get tool config asynchronously.""" + return self.get_tool_config() + + def _get_tool_list(self, key: str) -> list[str] | None: + """Get tool list from CLI/env (type-safe!).""" + # Check CLI + if key in self._cli_overrides.tools: + return self._cli_overrides.tools[key] # type: ignore[no-any-return] + + # Check environment (type-safe with EnvVar!) + if key == "include_tools": + return get_env_list(EnvVar.CLI_INCLUDE_TOOLS) + elif key == "exclude_tools": + return get_env_list(EnvVar.CLI_EXCLUDE_TOOLS) + + return None + + def _get_tool_bool(self, key: str) -> bool | None: + """Get boolean tool config from CLI/env (type-safe!).""" + # Check CLI + if key in self._cli_overrides.tools: + return bool(self._cli_overrides.tools[key]) + + # Check environment (type-safe with EnvVar!) + if key == "dynamic_tools_enabled": + env_val = get_env(EnvVar.CLI_DYNAMIC_TOOLS) + if env_val is not None: + return get_env_bool(EnvVar.CLI_DYNAMIC_TOOLS) + + return None + + def _get_tool_int(self, key: str) -> int | None: + """Get integer tool config from CLI/env (type-safe!).""" + # Check CLI + if key in self._cli_overrides.tools: + try: + return int(self._cli_overrides.tools[key]) + except (ValueError, TypeError): + pass + + # No env vars for max_concurrency currently + return None + + # ================================================================ + # Provider/Model + # ================================================================ + + @property + def provider(self) -> str: + """Get resolved provider (type-safe!).""" + return ( + self._cli_overrides.provider + or get_env(EnvVar.PROVIDER) + or self._file_config.default_provider + ) + + @property + def model(self) -> str: + """Get resolved model (type-safe!).""" + return ( + self._cli_overrides.model + or get_env(EnvVar.MODEL) + or self._file_config.default_model + ) + + # ================================================================ + # Debug + # ================================================================ + + def debug_report(self) -> dict[str, Any]: + """Generate debug report showing all resolved values and sources.""" + return { + "timeouts": { + tt.value: { + "value": self._timeout_cache.get( + tt, self._resolve_timeout(tt) + ).value, + "source": self._timeout_cache.get( + tt, self._resolve_timeout(tt) + ).source.value, + } + for tt in TimeoutType + }, + "provider": self.provider, + "model": self.model, + "tools": self.get_tool_config().model_dump(), + } diff --git a/src/mcp_cli/config/server_models.py b/src/mcp_cli/config/server_models.py new file mode 100644 index 00000000..c9e679b5 --- /dev/null +++ b/src/mcp_cli/config/server_models.py @@ -0,0 +1,202 @@ +"""Clean Pydantic models for server configurations - no more dict goop!""" + +from __future__ import annotations + +from typing import Any + +from pydantic import BaseModel, Field, field_validator + + +class HTTPServerConfig(BaseModel): + """HTTP/SSE server configuration.""" + + name: str + url: str + headers: dict[str, str] = Field(default_factory=dict) + disabled: bool = False + + model_config = {"frozen": True} + + @field_validator("url") + @classmethod + def validate_url(cls, v: str) -> str: + """Validate URL format.""" + if not v.startswith(("http://", "https://")): + raise ValueError("URL must start with http:// or https://") + return v + + +class STDIOServerConfig(BaseModel): + """STDIO server configuration.""" + + name: str + command: str + args: list[str] = Field(default_factory=list) + env: dict[str, str] = Field(default_factory=dict) + disabled: bool = False + + model_config = {"frozen": True} + + @field_validator("command") + @classmethod + def validate_command(cls, v: str) -> str: + """Validate command is not empty.""" + if not v or not v.strip(): + raise ValueError("Command cannot be empty") + return v.strip() + + +class OAuthConfig(BaseModel): + """OAuth configuration for HTTP servers.""" + + client_id: str + client_secret: str | None = Field(default=None, repr=False) # Don't print secrets + authorization_url: str + token_url: str + scopes: list[str] = Field(default_factory=list) + redirect_uri: str = "http://localhost:8080/callback" + + model_config = {"frozen": True} + + +class UnifiedServerConfig(BaseModel): + """Unified server configuration supporting both HTTP and STDIO transports. + + Exactly one of (url, command) must be provided. + """ + + name: str + + # HTTP/SSE transport + url: str | None = None + headers: dict[str, str] | None = None + oauth: OAuthConfig | None = None + + # STDIO transport + command: str | None = None + args: list[str] = Field(default_factory=list) + env: dict[str, str] = Field(default_factory=dict) + + # Common + disabled: bool = False + + model_config = {"frozen": True} + + @field_validator("name") + @classmethod + def validate_name(cls, v: str) -> str: + """Validate server name.""" + if not v or not v.strip(): + raise ValueError("Server name cannot be empty") + return v.strip() + + @field_validator("url") + @classmethod + def validate_url_format(cls, v: str | None) -> str | None: + """Validate URL format if provided.""" + if v and not v.startswith(("http://", "https://")): + raise ValueError("URL must start with http:// or https://") + return v + + @field_validator("command") + @classmethod + def validate_command_not_empty(cls, v: str | None) -> str | None: + """Validate command is not empty string if provided.""" + if v is not None and not v.strip(): + raise ValueError("Command cannot be empty string") + return v.strip() if v else None + + def model_post_init(self, __context) -> None: + """Validate that exactly one of (url, command) is provided.""" + has_url = self.url is not None + has_command = self.command is not None + + if not has_url and not has_command: + raise ValueError( + f"Server '{self.name}' must have either 'url' (HTTP/SSE) or 'command' (STDIO)" + ) + if has_url and has_command: + raise ValueError( + f"Server '{self.name}' cannot have both 'url' and 'command' (choose one transport type)" + ) + + def to_http_config(self) -> HTTPServerConfig: + """Convert to HTTP server config (raises if not HTTP).""" + if not self.url: + raise ValueError(f"Server '{self.name}' is not an HTTP server") + return HTTPServerConfig( + name=self.name, + url=self.url, + headers=self.headers or {}, + disabled=self.disabled, + ) + + def to_stdio_config(self) -> STDIOServerConfig: + """Convert to STDIO server config (raises if not STDIO).""" + if not self.command: + raise ValueError(f"Server '{self.name}' is not a STDIO server") + return STDIOServerConfig( + name=self.name, + command=self.command, + args=self.args, + env=self.env, + disabled=self.disabled, + ) + + @property + def is_http(self) -> bool: + """Check if this is an HTTP/SSE server.""" + return self.url is not None + + @property + def is_stdio(self) -> bool: + """Check if this is a STDIO server.""" + return self.command is not None + + +class ServerConfigInput(BaseModel): + """Input model for parsing server configs from dicts. + + This is the mutable version used during parsing. + Convert to UnifiedServerConfig after validation. + """ + + command: str | None = None + args: list[str] = Field(default_factory=list) + env: dict[str, str] = Field(default_factory=dict) + url: str | None = None + headers: dict[str, str] | None = None + oauth: dict[str, Any] | OAuthConfig | None = None + disabled: bool = False + + model_config = {"frozen": False, "extra": "ignore"} + + def to_unified(self, name: str) -> UnifiedServerConfig: + """Convert to immutable UnifiedServerConfig.""" + # Parse OAuth if it's a dict + oauth_config = None + if self.oauth: + if isinstance(self.oauth, dict): + oauth_config = OAuthConfig.model_validate(self.oauth) + else: + oauth_config = self.oauth + + return UnifiedServerConfig( + name=name, + url=self.url, + headers=self.headers, + oauth=oauth_config, + command=self.command, + args=self.args, + env=self.env, + disabled=self.disabled, + ) + + +__all__ = [ + "HTTPServerConfig", + "STDIOServerConfig", + "OAuthConfig", + "UnifiedServerConfig", + "ServerConfigInput", +] diff --git a/src/mcp_cli/constants.py b/src/mcp_cli/constants.py deleted file mode 100644 index caf8ed41..00000000 --- a/src/mcp_cli/constants.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Central constants for MCP CLI.""" - -from importlib.metadata import version, PackageNotFoundError - -# Application namespace for token storage -# This is used to namespace all tokens stored by mcp-cli -# to avoid conflicts with other applications using the same libraries -NAMESPACE = "mcp-cli" - -# Token type namespaces -OAUTH_NAMESPACE = NAMESPACE # OAuth tokens for MCP servers -PROVIDER_NAMESPACE = "provider" # LLM provider API keys -GENERIC_NAMESPACE = "generic" # Generic bearer tokens and API keys - -# Application metadata -APP_NAME = "mcp-cli" - -# Get version from package metadata -try: - APP_VERSION = version("mcp-cli") -except PackageNotFoundError: - # Fallback for development/editable installs - APP_VERSION = "0.0.0-dev" diff --git a/src/mcp_cli/constants/__init__.py b/src/mcp_cli/constants/__init__.py new file mode 100644 index 00000000..ebf29c17 --- /dev/null +++ b/src/mcp_cli/constants/__init__.py @@ -0,0 +1,118 @@ +"""Constants module - DEPRECATED, use mcp_cli.config instead. + +This module re-exports everything from mcp_cli.config for backwards compatibility. +All new code should import directly from mcp_cli.config. +""" + +# Re-export everything from config for backwards compatibility +from mcp_cli.config import ( + # Application constants + APP_NAME, + APP_VERSION, + GENERIC_NAMESPACE, + NAMESPACE, + OAUTH_NAMESPACE, + PROVIDER_NAMESPACE, + # Timeouts + DEFAULT_HTTP_CONNECT_TIMEOUT, + DEFAULT_HTTP_REQUEST_TIMEOUT, + DISCOVERY_TIMEOUT, + REFRESH_TIMEOUT, + SHUTDOWN_TIMEOUT, + # Platforms + PLATFORM_DARWIN, + PLATFORM_LINUX, + PLATFORM_WINDOWS, + # Providers + PROVIDER_ANTHROPIC, + PROVIDER_DEEPSEEK, + PROVIDER_GROQ, + PROVIDER_OLLAMA, + PROVIDER_OPENAI, + PROVIDER_XAI, + SUPPORTED_PROVIDERS, + # JSON Schema + JSON_TYPE_ARRAY, + JSON_TYPE_BOOLEAN, + JSON_TYPE_INTEGER, + JSON_TYPE_NULL, + JSON_TYPE_NUMBER, + JSON_TYPE_OBJECT, + JSON_TYPE_STRING, + JSON_TYPES, + # Enums + ConversationAction, + OutputFormat, + ServerAction, + ServerStatus, + ThemeAction, + TokenAction, + TokenNamespace, + ToolAction, + # Environment variables + EnvVar, + get_env, + get_env_bool, + get_env_float, + get_env_int, + get_env_list, + is_set, + set_env, + unset_env, +) + +__all__ = [ + # Environment variables + "EnvVar", + "get_env", + "set_env", + "unset_env", + "is_set", + "get_env_int", + "get_env_float", + "get_env_bool", + "get_env_list", + # Enums + "ServerStatus", + "ConversationAction", + "OutputFormat", + "TokenAction", + "TokenNamespace", + "ServerAction", + "ToolAction", + "ThemeAction", + # Timeouts + "DISCOVERY_TIMEOUT", + "REFRESH_TIMEOUT", + "SHUTDOWN_TIMEOUT", + "DEFAULT_HTTP_CONNECT_TIMEOUT", + "DEFAULT_HTTP_REQUEST_TIMEOUT", + # Providers + "PROVIDER_OLLAMA", + "PROVIDER_OPENAI", + "PROVIDER_ANTHROPIC", + "PROVIDER_GROQ", + "PROVIDER_DEEPSEEK", + "PROVIDER_XAI", + "SUPPORTED_PROVIDERS", + # Platforms + "PLATFORM_WINDOWS", + "PLATFORM_DARWIN", + "PLATFORM_LINUX", + # JSON Schema types + "JSON_TYPE_STRING", + "JSON_TYPE_NUMBER", + "JSON_TYPE_INTEGER", + "JSON_TYPE_BOOLEAN", + "JSON_TYPE_ARRAY", + "JSON_TYPE_OBJECT", + "JSON_TYPE_NULL", + "JSON_TYPES", + # App constants + "NAMESPACE", + "OAUTH_NAMESPACE", + "PROVIDER_NAMESPACE", + "GENERIC_NAMESPACE", + "APP_NAME", + "APP_VERSION", +] diff --git a/src/mcp_cli/context/context_manager.py b/src/mcp_cli/context/context_manager.py index 2350194b..83fe958c 100644 --- a/src/mcp_cli/context/context_manager.py +++ b/src/mcp_cli/context/context_manager.py @@ -12,6 +12,7 @@ from pydantic import BaseModel, Field, ConfigDict, PrivateAttr, SkipValidation +from mcp_cli.config.defaults import DEFAULT_PROVIDER, DEFAULT_MODEL from mcp_cli.tools.manager import ToolManager from mcp_cli.model_management import ModelManager from mcp_cli.tools.models import ServerInfo, ToolInfo, ConversationMessage @@ -35,8 +36,8 @@ class ApplicationContext(BaseModel): # Configuration config_path: Path = Field(default_factory=lambda: Path("server_config.json")) - provider: str = "openai" - model: str = "gpt-4" + provider: str = DEFAULT_PROVIDER + model: str = DEFAULT_MODEL api_base: str | None = None api_key: str | None = None @@ -90,8 +91,8 @@ def create( context = cls( tool_manager=tool_manager, config_path=config_path or Path("server_config.json"), - provider=provider or "openai", - model=model or "gpt-4", + provider=provider or DEFAULT_PROVIDER, + model=model or DEFAULT_MODEL, **kwargs, ) return context diff --git a/src/mcp_cli/display/__init__.py b/src/mcp_cli/display/__init__.py new file mode 100644 index 00000000..f5ae78c5 --- /dev/null +++ b/src/mcp_cli/display/__init__.py @@ -0,0 +1,69 @@ +"""Unified streaming display system for MCP CLI. + +This module provides a clean, async-native display system using: +- Pydantic models for type-safe state management +- chuk-term for UI rendering +- No dictionary manipulation or magic strings +- Single, unified display path (no fallbacks) + +Also includes formatting and color utilities for UI components. +""" + +from mcp_cli.display.manager import StreamingDisplayManager +from mcp_cli.display.models import ( + ChunkField, + ContentType, + DisplayUpdate, + StreamingChunk, + StreamingPhase, + StreamingState, +) +from mcp_cli.display.formatting import ( + create_tools_table, + create_servers_table, + display_tool_call_result, + format_tool_for_display, +) +from mcp_cli.display.color_converter import ( + rich_to_prompt_toolkit, + create_transparent_completion_style, +) +from mcp_cli.display.formatters import ( + format_args_preview, + format_reasoning_preview, + format_content_preview, +) +from mcp_cli.display.renderers import ( + render_streaming_status, + render_tool_execution_status, + show_final_streaming_response, + show_tool_execution_result, +) + +__all__ = [ + # Core streaming display + "StreamingDisplayManager", + "ChunkField", + "ContentType", + "DisplayUpdate", + "StreamingChunk", + "StreamingPhase", + "StreamingState", + # Table/tool formatting utilities + "create_tools_table", + "create_servers_table", + "display_tool_call_result", + "format_tool_for_display", + # Color utilities + "rich_to_prompt_toolkit", + "create_transparent_completion_style", + # Preview formatters + "format_args_preview", + "format_reasoning_preview", + "format_content_preview", + # Status renderers + "render_streaming_status", + "render_tool_execution_status", + "show_final_streaming_response", + "show_tool_execution_result", +] diff --git a/src/mcp_cli/ui/color_converter.py b/src/mcp_cli/display/color_converter.py similarity index 100% rename from src/mcp_cli/ui/color_converter.py rename to src/mcp_cli/display/color_converter.py diff --git a/src/mcp_cli/display/formatters.py b/src/mcp_cli/display/formatters.py new file mode 100644 index 00000000..32227cca --- /dev/null +++ b/src/mcp_cli/display/formatters.py @@ -0,0 +1,195 @@ +"""Preview formatters for display components. + +This module provides formatting utilities for creating inline previews +of tool arguments, reasoning content, and other display elements. +""" + +from __future__ import annotations + +import json +import re +from typing import Any + + +def format_args_preview( + arguments: dict[str, Any], max_args: int = 4, max_len: int = 60 +) -> str: + """Format tool arguments for inline preview. + + Shows first N arguments, truncated to reasonable length. + + Args: + arguments: Tool arguments dict + max_args: Maximum number of arguments to show (default: 4) + max_len: Maximum length for each argument value (default: 60) + + Returns: + Formatted preview string + """ + if not arguments: + return "" + + # Get first N arguments + preview_items = [] + for key, value in list(arguments.items())[:max_args]: + # Format value + if isinstance(value, str): + val_str = value[:max_len] + "..." if len(value) > max_len else value + elif isinstance(value, (dict, list)): + json_str = json.dumps(value) + val_str = ( + json_str[:max_len] + "..." if len(json_str) > max_len else json_str + ) + else: + val_str = str(value)[:max_len] + + preview_items.append(f"{key}={val_str}") + + result = ", ".join(preview_items) + + # Add indicator if more args exist + if len(arguments) > max_args: + result += f" +{len(arguments) - max_args} more" + + return result + + +def format_reasoning_preview( + reasoning: str, max_len: int = 50, from_end: bool = True +) -> str: + """Format reasoning content for inline preview. + + Shows a clean excerpt of the reasoning with proper word boundaries. + By default shows last N chars (most recent thinking). + + Args: + reasoning: Full reasoning content + max_len: Maximum length to show + from_end: Whether to show from end (recent) or beginning + + Returns: + Formatted preview string with clean word boundaries + """ + if not reasoning: + return "" + + # Clean up whitespace aggressively (normalize newlines, tabs, multiple spaces) + cleaned = " ".join(reasoning.split()) + + # Deduplicate repeated sentences within a sliding window (helps with repetitive reasoning) + # Split on sentence boundaries (., !, ?) + + # Split but keep the delimiters + parts = re.split(r"([.!?]\s+)", cleaned) + + sentences: list[str] = [] + current = "" + for i, part in enumerate(parts): + if i % 2 == 0: + # This is text + current = part.strip() + else: + # This is delimiter + if current: + # Check if this sentence appeared in the last 3 sentences (sliding window) + recent_sentences = sentences[-3:] if len(sentences) >= 3 else sentences + if current not in recent_sentences: + sentences.append(current) + current = "" + + # Add last sentence if exists + if current: + recent_sentences = sentences[-3:] if len(sentences) >= 3 else sentences + if current not in recent_sentences: + sentences.append(current) + + # Rejoin with periods + cleaned = ". ".join(sentences) + if reasoning.rstrip().endswith((".", "!", "?")): + cleaned += "." + + if len(cleaned) <= max_len: + return cleaned + + if from_end: + # Strategy: Show an excerpt from 60-70% through the text + # This avoids showing just the very end (which may be repetitive) + # while still showing recent thinking + + if len(cleaned) > max_len * 2: + # For long reasoning, take from 60-70% of the way through + # This balances "recent" with "diverse" + start_pos = int(len(cleaned) * 0.60) + preview = cleaned[start_pos : start_pos + max_len + 60] + else: + # For shorter reasoning, take from end with buffer + buffer_size = 60 + preview = cleaned[-(max_len + buffer_size) :] + + # Find first complete sentence if possible + sentence_starts = [] + for i, char in enumerate(preview): + if i > 0 and i < 60 and char.isupper() and preview[i - 1] in ".!? ": + sentence_starts.append(i) + + # Use last sentence start if available + if sentence_starts: + preview = preview[sentence_starts[-1] :] + else: + # Otherwise find first complete word + first_space = preview.find(" ") + if first_space > 0 and first_space < 40: + preview = preview[first_space + 1 :] + + # Truncate to max_len at sentence or word boundary + if len(preview) > max_len: + preview = preview[:max_len] + # Try sentence ending first + for punct in [". ", "! ", "? "]: + punct_idx = preview.rfind(punct) + if punct_idx > max_len * 0.5: + return f"...{preview[: punct_idx + 1]}" + + # Fall back to word boundary + last_space = preview.rfind(" ") + if last_space > max_len * 0.6: + preview = preview[:last_space] + + return f"...{preview}" + else: + # Show first N chars + preview = cleaned[: max_len + 20] + + # Truncate to max_len at word boundary + if len(preview) > max_len: + preview = preview[:max_len] + last_space = preview.rfind(" ") + if last_space > max_len * 0.7: + preview = preview[:last_space] + + return f"{preview}..." + + +def format_content_preview(content: str, max_len: int = 100) -> str: + """Format content for inline preview. + + Args: + content: Full content + max_len: Maximum length to show + + Returns: + Formatted preview string + """ + if not content: + return "" + + if len(content) <= max_len: + return content + + # Show first N chars, try to break at word boundary + preview = content[:max_len] + space_idx = preview.rfind(" ") + if space_idx > max_len // 2: # Only use word boundary if it's reasonably far + preview = preview[:space_idx] + + return f"{preview}..." diff --git a/src/mcp_cli/ui/formatting.py b/src/mcp_cli/display/formatting.py similarity index 99% rename from src/mcp_cli/ui/formatting.py rename to src/mcp_cli/display/formatting.py index 0bac801a..70e6811d 100644 --- a/src/mcp_cli/ui/formatting.py +++ b/src/mcp_cli/display/formatting.py @@ -1,6 +1,8 @@ # src/mcp_cli/ui/formatting.py """Helper functions for tool display and formatting using chuk-term.""" +import json + from chuk_term.ui import output, format_table from mcp_cli.tools.models import ToolInfo, ServerInfo @@ -85,8 +87,6 @@ def create_servers_table(servers: list[ServerInfo]): def display_tool_call_result(result, console=None): """Display the result of a tool call using chuk-term.""" - import json - if result.success: # Display success header with timing title = f"✓ Tool '{result.tool_name}' completed" @@ -145,8 +145,6 @@ def display_tool_call_result(result, console=None): else: # For other types, format as JSON try: - import json - formatted = json.dumps(result.result, indent=2) if len(formatted) > 500: output.code(formatted[:500] + "\n... (truncated)", language="json") diff --git a/src/mcp_cli/display/manager.py b/src/mcp_cli/display/manager.py new file mode 100644 index 00000000..653bc1a4 --- /dev/null +++ b/src/mcp_cli/display/manager.py @@ -0,0 +1,768 @@ +"""Unified streaming display manager using chuk-term. + +This module provides a clean, async-native streaming display implementation +that uses Pydantic models for state management and chuk-term for UI rendering. + +Replaces the dual display system (ChatDisplayManager + StreamingContext). +""" + +from __future__ import annotations + +import asyncio +import sys +import time +from typing import Protocol, TYPE_CHECKING + +from chuk_term.ui import output + +from mcp_cli.display.models import ( + ContentType, + StreamingChunk, + StreamingState, +) +from mcp_cli.display.renderers import ( + render_streaming_status, + render_tool_execution_status, + show_final_streaming_response, + show_tool_execution_result, +) +from mcp_cli.config.logging import get_logger + +if TYPE_CHECKING: + from chuk_term.ui import LiveStatus + from mcp_cli.chat.models import ToolExecutionState + +logger = get_logger("streaming_display") + + +class DisplayRenderer(Protocol): + """Protocol for rendering different content types.""" + + def render(self, content: str, content_type: ContentType) -> str: + """Render content according to its type.""" + ... + + +class ChukTermRenderer: + """Renderer implementation using chuk-term for display.""" + + def render(self, content: str, content_type: ContentType) -> str: + """Render content with appropriate formatting. + + For now, returns content as-is since chuk-term handles formatting. + Future: Could add syntax highlighting, markdown rendering, etc. + """ + return content + + +class StreamingDisplayManager: + """Unified display manager for streaming responses. + + Uses Pydantic models for state and chuk-term for rendering. + This is the ONLY display system - no fallback paths. + """ + + def __init__(self, renderer: DisplayRenderer | None = None): + """Initialize display manager. + + Args: + renderer: Custom renderer, defaults to ChukTermRenderer + """ + self.renderer = renderer or ChukTermRenderer() + + # Streaming state + self.streaming_state: StreamingState | None = None + + # Tool execution state + self.tool_execution: "ToolExecutionState | None" = None + + # Live status display for tool execution (handles terminal control properly) + # Uses chuk_term.ui.LiveStatus + self._tool_status: "LiveStatus | None" = None + + # Background refresh task + self._refresh_task: asyncio.Task | None = None + self._refresh_active: bool = False + + # Spinner animation + self._spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] + self._spinner_index = 0 + + # Reasoning display debouncing + self._last_reasoning_update = 0.0 + self._last_reasoning_chunk_count = 0 + self._last_reasoning_preview = "" # Cached preview text + self._reasoning_update_interval = ( + 2.0 # seconds (increased for less frequent updates) + ) + self._reasoning_chunk_interval = 20 # chunks (increased for less noise) + + # Display state + self._last_status = "" + self._last_line_count = 0 # Track how many lines were last printed + self._showing_thinking = False # Track if we're showing thinking vs content + + # Rendering lock to prevent simultaneous updates + self._render_lock = asyncio.Lock() + + # ==================== STREAMING OPERATIONS ==================== + + async def start_streaming(self) -> None: + """Start a new streaming operation.""" + if self.streaming_state and self.streaming_state.is_active: + logger.warning("Streaming already active, stopping previous stream") + await self.stop_streaming(interrupted=True) + + self.streaming_state = StreamingState() + self._refresh_active = True + await self._start_refresh_loop() + + logger.debug("Started streaming display") + + async def add_chunk(self, raw_chunk: dict) -> None: + """Process and display a new chunk. + + Args: + raw_chunk: Raw chunk data from LLM provider + """ + if not self.streaming_state: + logger.warning("No active streaming state, starting new stream") + await self.start_streaming() + if not self.streaming_state: # Type guard + return + + # Parse chunk into normalized format + chunk = StreamingChunk.from_raw_chunk(raw_chunk) + + # Update state + self.streaming_state.add_chunk(chunk) + + # Refresh display (background task will handle actual rendering) + await self._trigger_refresh() + + async def update_reasoning(self, reasoning: str) -> None: + """Update reasoning content during streaming. + + Args: + reasoning: Reasoning/thinking content from model + """ + if not self.streaming_state: + return + + self.streaming_state.reasoning_content = reasoning + await self._trigger_refresh() + + async def stop_streaming(self, interrupted: bool = False) -> str: + """Stop streaming and return final content. + + Args: + interrupted: Whether streaming was interrupted by user + + Returns: + Final accumulated content + """ + if not self.streaming_state: + logger.warning("No active streaming state to stop") + return "" + + # Mark state as complete + self.streaming_state.complete(interrupted=interrupted) + + # Stop refresh loop + await self._stop_refresh_loop() + + # Finish the live display (clear and reset state) + self._finish_display() + + # Show final output + final_content = self.streaming_state.accumulated_content + elapsed = self.streaming_state.elapsed_time + chunks_received = self.streaming_state.chunks_received + + if final_content: + self._show_final_response(final_content, elapsed, interrupted) + + logger.debug( + f"Stopped streaming: {len(final_content)} chars in {elapsed:.2f}s, " + f"{chunks_received} chunks" + ) + + # Clear streaming state to avoid any interference with subsequent tool execution + # This ensures the display manager is in a clean state for the next operation + self.streaming_state = None + + return final_content + + # ==================== TOOL EXECUTION OPERATIONS ==================== + + async def start_tool_execution(self, name: str, arguments: dict) -> None: + """Start displaying tool execution. + + Args: + name: Tool name + arguments: Tool arguments + """ + + from mcp_cli.chat.models import ToolExecutionState + + # Acquire render lock to prevent race conditions during transition + # This ensures the refresh loop doesn't render between clearing and setting tool state + async with self._render_lock: + # If transitioning from streaming to tool execution, clear streaming display + if self.streaming_state and self.streaming_state.is_active: + self._do_clear_display() + + # Clear any stale streaming state (even if not active) + # This ensures we don't have leftover state from previous operations + if self.streaming_state: + self.streaming_state = None + + # Reset ALL display state to ensure clean start + # This is critical after Rich output which may leave cursor in unexpected state + self._last_status = "" + self._last_line_count = 0 + self._showing_thinking = False + self._last_reasoning_preview = "" + + # Set tool execution state while still holding the lock + self.tool_execution = ToolExecutionState( + name=name, arguments=arguments, start_time=time.time() + ) + + # Create live status display for tool execution + # This handles terminal control properly even when other output occurs + from chuk_term.ui import LiveStatus + + self._tool_status = LiveStatus(refresh_per_second=10, transient=True) + self._tool_status.start() + + # Stop any existing refresh loop before starting a new one + # This ensures we don't have competing loops trying to render + if self._refresh_task and not self._refresh_task.done(): + self._refresh_active = False + try: + await asyncio.wait_for(self._refresh_task, timeout=0.5) + except asyncio.TimeoutError: + self._refresh_task.cancel() + try: + await self._refresh_task + except asyncio.CancelledError: + pass + self._refresh_task = None + + self._refresh_active = True + await self._start_refresh_loop() + + logger.debug(f"Started tool execution display: {name}") + + def _do_clear_display(self) -> None: + """Internal method to clear display (no lock, called with lock held). + + Clears the current status display. Should only be called when + render lock is already held. + """ + from chuk_term.ui.terminal import clear_lines, move_cursor_up + + if self._last_line_count > 0: + # Move to first line if needed + if self._last_line_count > 1: + move_cursor_up(self._last_line_count - 1) + sys.stdout.write("\r") + sys.stdout.flush() + + # Clear all lines + clear_lines(self._last_line_count) + + # Reset display state (but keep other state intact) + self._last_line_count = 0 + self._last_status = "" + + async def stop_tool_execution(self, result: str, success: bool = True) -> None: + """Stop tool execution display and show result. + + Args: + result: Tool execution result + success: Whether execution succeeded + """ + + if not self.tool_execution: + logger.warning("No active tool execution to stop") + return + + # Update state + elapsed = time.time() - self.tool_execution.start_time + self.tool_execution.result = result + self.tool_execution.success = success + self.tool_execution.elapsed = elapsed + self.tool_execution.completed = True + + # Stop refresh + await self._stop_refresh_loop() + + # Stop live status display + if self._tool_status: + self._tool_status.stop() + self._tool_status = None + + # Show final result (uses Rich output) + self._show_tool_result(self.tool_execution) + + # Ensure stdout is flushed after Rich output + # This helps prevent state issues with subsequent direct stdout writes + sys.stdout.flush() + + self.tool_execution = None + + # ==================== USER MESSAGES ==================== + + def show_user_message(self, message: str) -> None: + """Display a user message. + + Args: + message: User message content + """ + output.print(f"\n👤 User: {message}") + + def show_system_message(self, message: str) -> None: + """Display a system message. + + Args: + message: System message content + """ + output.info(message) + + # ==================== INTERNAL RENDERING ==================== + + async def _start_refresh_loop(self) -> None: + """Start background refresh loop.""" + if self._refresh_task and not self._refresh_task.done(): + return # Already running + + self._refresh_active = True + self._refresh_task = asyncio.create_task(self._refresh_loop()) + logger.debug("Started refresh loop") + + async def _stop_refresh_loop(self) -> None: + """Stop background refresh loop.""" + from mcp_cli.config import REFRESH_TIMEOUT + + self._refresh_active = False + + if self._refresh_task and not self._refresh_task.done(): + try: + await asyncio.wait_for(self._refresh_task, timeout=REFRESH_TIMEOUT) + except asyncio.TimeoutError: + self._refresh_task.cancel() + try: + await self._refresh_task + except asyncio.CancelledError: + pass + + self._refresh_task = None + logger.debug("Stopped refresh loop") + + async def _refresh_loop(self) -> None: + """Background loop that refreshes the display. + + Runs at 10 Hz (100ms interval) for smooth animation. + + Priority order: + 1. Tool execution (highest priority - shows what's actively happening) + 2. Streaming status (shows LLM response progress) + """ + try: + while self._refresh_active: + # Tool execution takes priority over streaming + # because we want to show what's actively happening + if self.tool_execution and not self.tool_execution.completed: + await self._render_tool_status() + elif self.streaming_state and self.streaming_state.is_active: + await self._render_streaming_status() + # If neither condition is met, don't render anything + # This prevents stale state from interfering + + await asyncio.sleep(0.1) # 10 Hz refresh + except asyncio.CancelledError: + # Expected when stopping the loop + pass + except Exception as e: + logger.error(f"Error in refresh loop: {e}", exc_info=True) + + async def _trigger_refresh(self) -> None: + """Trigger an immediate refresh (for new content).""" + # The background loop will pick up changes automatically + pass + + async def _render_streaming_status(self) -> None: + """Render current streaming status with debounced reasoning preview.""" + if not self.streaming_state: + return + + # Acquire lock to prevent simultaneous rendering + async with self._render_lock: + # Update spinner + self._spinner_index = (self._spinner_index + 1) % len(self._spinner_frames) + spinner = self._spinner_frames[self._spinner_index] + + # Update cached reasoning preview if debounce allows + self._update_reasoning_preview() + + # Render status using renderer with cached preview + _ = render_streaming_status( + self.streaming_state, + spinner, + reasoning_preview=self._last_reasoning_preview, + ) + + # Determine what mode we're in + has_content = self.streaming_state.content_length > 0 + has_reasoning = bool(self._last_reasoning_preview) + + # Decide what to show: reasoning preview OR content count + if has_content: + # Show content count (no reasoning) + current_mode = "content" + display_status = render_streaming_status( + self.streaming_state, spinner, reasoning_preview="" + ) + num_lines = 1 + elif has_reasoning: + # Show reasoning preview (no content yet) - multi-line + current_mode = "thinking" + display_status = self._last_reasoning_preview + # Count lines in preview (header + content lines) + num_lines = display_status.count("\n") + 1 + else: + # Neither - just show basic status + current_mode = "basic" + display_status = render_streaming_status( + self.streaming_state, spinner, reasoning_preview="" + ) + num_lines = 1 + + # Check if we're switching modes (need newline) + mode_switched = (current_mode == "content" and self._showing_thinking) or ( + current_mode == "thinking" + and not self._showing_thinking + and self._last_status + ) + + # Only update if changed + if display_status != self._last_status: + if mode_switched: + # Clear previous display if switching from multi-line thinking + if self._showing_thinking and self._last_line_count > 1: + from chuk_term.ui.terminal import clear_lines + + clear_lines(self._last_line_count) + + # Moving to new mode + if current_mode != "basic": + sys.stdout.write("\n") + else: + # Same mode - clear and update in place + if num_lines > 1: + # Multi-line: clear all previous lines and rewrite + if self._last_line_count > 0: + from chuk_term.ui.terminal import ( + clear_lines, + move_cursor_up, + ) + + # Move to first line + if self._last_line_count > 1: + move_cursor_up(self._last_line_count - 1) + sys.stdout.write("\r") + sys.stdout.flush() + # Clear all lines + clear_lines(self._last_line_count) + else: + # Single line: simple clear and rewrite + # \r = carriage return (go to start of line) + # \033[K = clear from cursor to end of line + sys.stdout.write(f"\r\033[K{display_status}") + sys.stdout.flush() + # Skip the separate write below + self._last_status = display_status + self._last_line_count = num_lines + self._showing_thinking = current_mode == "thinking" + return + + # Write new status (for multi-line or mode-switched cases) + sys.stdout.write(display_status) + sys.stdout.flush() + + # Update state + self._last_status = display_status + self._last_line_count = num_lines + self._showing_thinking = current_mode == "thinking" + + def _clear_previous_lines(self) -> None: + """Clear previous status lines from terminal. + + Assumes cursor is at the start of the first line. + """ + if self._last_line_count <= 0: + return + + # Build clear sequence as a single string with ANSI codes + clear_parts = [] + + # Clear all lines (cursor starts at first line) + for i in range(self._last_line_count): + clear_parts.append("\033[K") # Clear current line + if i < self._last_line_count - 1: + clear_parts.append("\n") # Move to next line + + # Move back to first line + if self._last_line_count > 1: + for _ in range(self._last_line_count - 1): + clear_parts.append("\033[A") # Move up + + # Position at start of first line + clear_parts.append("\r") + + # Print everything at once (will be detected as ANSI and written directly) + clear_sequence = "".join(clear_parts) + output.print(clear_sequence, end="") + + async def _clear_current_display_async(self) -> None: + """Clear the current status display without resetting state (async version). + + This is used when transitioning between display modes (e.g., streaming to tool). + Unlike _finish_display(), this doesn't print a newline afterward. + + Must be called from async context to properly acquire render lock. + """ + from chuk_term.ui.terminal import clear_lines, move_cursor_up + + async with self._render_lock: + if self._last_line_count > 0: + # Move to first line if needed + if self._last_line_count > 1: + move_cursor_up(self._last_line_count - 1) + sys.stdout.write("\r") + sys.stdout.flush() + + # Clear all lines + clear_lines(self._last_line_count) + + # Reset display state (but keep other state intact) + self._last_line_count = 0 + self._last_status = "" + + def _clear_current_display(self) -> None: + """Clear the current status display without resetting state (sync version). + + This is used when transitioning between display modes (e.g., streaming to tool). + Unlike _finish_display(), this doesn't print a newline afterward. + + Note: For async contexts, prefer _clear_current_display_async() to properly + coordinate with the render lock. + """ + from chuk_term.ui.terminal import clear_lines, move_cursor_up + + if self._last_line_count > 0: + # Move to first line if needed + if self._last_line_count > 1: + move_cursor_up(self._last_line_count - 1) + sys.stdout.write("\r") + sys.stdout.flush() + + # Clear all lines + clear_lines(self._last_line_count) + + # Reset display state (but keep other state intact) + self._last_line_count = 0 + self._last_status = "" + + def _finish_display(self) -> None: + """Finish the live display and prepare for normal output. + + This clears the current display and resets state so that + subsequent output appears normally without being mangled. + """ + + # Clear current display + self._clear_current_display() + + # Move to a fresh line for subsequent output + # Use direct stdout write instead of print() for consistent behavior + sys.stdout.write("\n") + sys.stdout.flush() + + def _split_preview_into_lines( + self, text: str, max_line_len: int = 80, num_lines: int = 3 + ) -> list[str]: + """Split preview text into multiple lines at word boundaries. + + Args: + text: Text to split (may start with '...') + max_line_len: Maximum length per line + num_lines: Number of lines to create + + Returns: + List of lines (may be fewer than num_lines if text is short) + """ + # Remove leading '...' if present for splitting, we'll add it back + has_ellipsis = text.startswith("...") + if has_ellipsis: + text = text[3:].lstrip() + + words = text.split() + lines = [] + current_line: list[str] = [] + current_len = 0 + + for word in words: + word_len = len(word) + (1 if current_line else 0) # +1 for space + + if current_len + word_len > max_line_len and current_line: + # Line is full, save it + lines.append(" ".join(current_line)) + if len(lines) >= num_lines: + break + current_line = [word] + current_len = len(word) + else: + current_line.append(word) + current_len += word_len + + # Add remaining words as last line + if current_line and len(lines) < num_lines: + lines.append(" ".join(current_line)) + + # Add ellipsis to first line if original had it + if lines and has_ellipsis: + lines[0] = f"...{lines[0]}" + + return lines + + def _update_reasoning_preview(self) -> None: + """Update cached reasoning preview with debouncing. + + Updates preview only if: + - 1 second has passed since last update, OR + - 10 chunks have been received since last update + + This keeps the preview visible but only updates the text periodically. + """ + if not self.streaming_state or not self.streaming_state.reasoning_content: + self._last_reasoning_preview = "" + return + + from mcp_cli.display.formatters import format_reasoning_preview + + current_time = time.time() + current_chunks = self.streaming_state.chunks_received + + # Check if we should update (time-based or chunk-based) + should_update = False + + # Check time-based debounce + time_elapsed = current_time - self._last_reasoning_update + if time_elapsed >= self._reasoning_update_interval: + should_update = True + + # Check chunk-based debounce + chunks_since_update = current_chunks - self._last_reasoning_chunk_count + if chunks_since_update >= self._reasoning_chunk_interval: + should_update = True + + # Update cached preview if debounce passed + if should_update: + self._last_reasoning_update = current_time + self._last_reasoning_chunk_count = current_chunks + + # Update cached preview - 3-line format for better context + reasoning_len = len(self.streaming_state.reasoning_content) + + # Format length compactly + if reasoning_len >= 1000: + len_str = f"{reasoning_len / 1000:.1f}k" + else: + len_str = str(reasoning_len) + + # Get a longer preview for 3 lines (~240 chars = 3 x 80 chars per line) + preview_text = format_reasoning_preview( + self.streaming_state.reasoning_content, max_len=240 + ) + + # Split into 3 lines of ~80 chars each + lines = self._split_preview_into_lines( + preview_text, max_line_len=80, num_lines=3 + ) + + # Format as 3-line preview with header + preview_lines = [f"💭 Thinking ({len_str} chars):"] + preview_lines.extend(f" {line}" for line in lines) + + self._last_reasoning_preview = "\n".join(preview_lines) + + async def _render_tool_status(self) -> None: + """Render current tool execution status with argument preview.""" + if not self.tool_execution: + return + + # Acquire lock to prevent simultaneous rendering with streaming + async with self._render_lock: + # Update spinner + self._spinner_index = (self._spinner_index + 1) % len(self._spinner_frames) + spinner = self._spinner_frames[self._spinner_index] + + elapsed = time.time() - self.tool_execution.start_time + + # Render status using renderer + status = render_tool_execution_status(self.tool_execution, spinner, elapsed) + + # Only update if changed + if status != self._last_status: + # Update live status display if active + if self._tool_status: + self._tool_status.update(status) + + self._last_status = status + self._last_line_count = 1 # Tool status is always single line + + def _show_final_response( + self, content: str, elapsed: float, interrupted: bool + ) -> None: + """Show final streaming response (delegates to renderer). + + Args: + content: Final content + elapsed: Elapsed time + interrupted: Whether interrupted + """ + show_final_streaming_response(content, elapsed, interrupted) + + def _show_tool_result(self, tool: "ToolExecutionState") -> None: + """Show final tool execution result (delegates to renderer). + + Args: + tool: Tool execution state + """ + + show_tool_execution_result(tool) + + # CRITICAL: After Rich output completes, ensure stdout is flushed and + # terminal is ready for subsequent direct writes. + # Rich may buffer output or leave cursor state ambiguous. + sys.stdout.flush() + + # ==================== STATE QUERIES ==================== + + @property + def is_streaming(self) -> bool: + """Whether currently streaming.""" + return self.streaming_state is not None and self.streaming_state.is_active + + @property + def is_tool_executing(self) -> bool: + """Whether currently executing a tool.""" + return self.tool_execution is not None and not self.tool_execution.completed + + @property + def is_busy(self) -> bool: + """Whether display is currently busy (streaming or executing).""" + return self.is_streaming or self.is_tool_executing diff --git a/src/mcp_cli/display/models.py b/src/mcp_cli/display/models.py new file mode 100644 index 00000000..41a2e210 --- /dev/null +++ b/src/mcp_cli/display/models.py @@ -0,0 +1,328 @@ +"""Pydantic models for streaming display state. + +This module defines type-safe models for all streaming display operations, +eliminating dictionary-based state management and magic strings. +""" + +from __future__ import annotations + +import time +from enum import Enum +from typing import Any +from pydantic import BaseModel, Field + + +class ChunkField(str, Enum): + """Field names for raw streaming chunks from various providers.""" + + # Top-level fields + RESPONSE = "response" + CONTENT = "content" + TEXT = "text" + CHOICES = "choices" + TOOL_CALLS = "tool_calls" + REASONING_CONTENT = "reasoning_content" + FINISH_REASON = "finish_reason" + + # Delta fields (OpenAI/DeepSeek format) + DELTA = "delta" + + +class ContentType(str, Enum): + """Detected content type for appropriate rendering.""" + + TEXT = "text" + CODE = "code" + MARKDOWN = "markdown" + MARKDOWN_TABLE = "markdown_table" + JSON = "json" + SQL = "sql" + MARKUP = "markup" + UNKNOWN = "unknown" + + +class StreamingPhase(str, Enum): + """Current phase of streaming operation.""" + + INITIALIZING = "initializing" + RECEIVING = "receiving" + PROCESSING = "processing" + COMPLETING = "completing" + COMPLETED = "completed" + INTERRUPTED = "interrupted" + ERROR = "error" + + +class StreamingChunk(BaseModel): + """A single chunk received from the streaming LLM response. + + This normalizes various chunk formats from different providers + into a consistent structure. + """ + + content: str | None = Field(default=None, description="Text content in this chunk") + tool_calls: list[dict[str, Any]] | None = Field( + default=None, description="Tool call data in this chunk" + ) + reasoning_content: str | None = Field( + default=None, description="Reasoning/thinking content (DeepSeek Reasoner)" + ) + finish_reason: str | None = Field( + default=None, description="Reason for completion if final chunk" + ) + metadata: dict[str, Any] = Field( + default_factory=dict, description="Additional provider-specific metadata" + ) + + model_config = {"frozen": True} + + @classmethod + def from_raw_chunk(cls, chunk: dict[str, Any]) -> StreamingChunk: + """Parse a raw chunk from various LLM providers into normalized format. + + Supports multiple formats: + - chuk-llm format: {'response': str, 'tool_calls': list, ...} + - OpenAI format: {'choices': [{'delta': {...}}], ...} + - Direct format: {'content': str, ...} + """ + # Extract content from various formats + content = None + if ChunkField.RESPONSE in chunk: + content = chunk[ChunkField.RESPONSE] + elif ChunkField.CONTENT in chunk: + content = chunk[ChunkField.CONTENT] + elif ChunkField.TEXT in chunk: + content = chunk[ChunkField.TEXT] + elif ChunkField.DELTA in chunk: + delta = chunk[ChunkField.DELTA] + if isinstance(delta, dict): + content = delta.get(ChunkField.CONTENT) + elif ChunkField.CHOICES in chunk and chunk[ChunkField.CHOICES]: + choice = chunk[ChunkField.CHOICES][0] + if ChunkField.DELTA in choice: + content = choice[ChunkField.DELTA].get(ChunkField.CONTENT) + + # Extract tool calls + tool_calls = chunk.get(ChunkField.TOOL_CALLS) + + # Extract reasoning content (DeepSeek sends in delta) + reasoning = None + if ChunkField.REASONING_CONTENT in chunk: + reasoning = chunk[ChunkField.REASONING_CONTENT] + elif ChunkField.CHOICES in chunk and chunk[ChunkField.CHOICES]: + choice = chunk[ChunkField.CHOICES][0] + if ChunkField.DELTA in choice: + delta = choice[ChunkField.DELTA] + if isinstance(delta, dict): + reasoning = delta.get(ChunkField.REASONING_CONTENT) + + # Extract finish reason + finish_reason = chunk.get(ChunkField.FINISH_REASON) + if ( + not finish_reason + and ChunkField.CHOICES in chunk + and chunk[ChunkField.CHOICES] + ): + finish_reason = chunk[ChunkField.CHOICES][0].get(ChunkField.FINISH_REASON) + + return cls( + content=content, + tool_calls=tool_calls, + reasoning_content=reasoning, + finish_reason=finish_reason, + metadata=chunk, + ) + + +class StreamingState(BaseModel): + """Complete state of an active streaming operation. + + This replaces scattered state variables with a single, type-safe model. + """ + + # Content accumulation + accumulated_content: str = Field( + default="", description="All content received so far" + ) + reasoning_content: str = Field( + default="", description="Reasoning/thinking content accumulated" + ) + + # Chunk tracking + chunks_received: int = Field(default=0, description="Total chunks processed") + last_chunk_time: float = Field( + default_factory=time.time, description="Timestamp of last chunk" + ) + + # Content detection + detected_type: ContentType = Field( + default=ContentType.UNKNOWN, description="Detected content type" + ) + + # Phase tracking + phase: StreamingPhase = Field( + default=StreamingPhase.INITIALIZING, description="Current streaming phase" + ) + + # Timing + start_time: float = Field( + default_factory=time.time, description="When streaming started" + ) + end_time: float | None = Field(default=None, description="When streaming completed") + + # Completion + finish_reason: str | None = Field(default=None, description="Why streaming ended") + interrupted: bool = Field(default=False, description="Whether user interrupted") + + model_config = {"frozen": False} + + @property + def elapsed_time(self) -> float: + """Calculate elapsed time since streaming started.""" + end = self.end_time if self.end_time else time.time() + return end - self.start_time + + @property + def content_length(self) -> int: + """Total characters in accumulated content.""" + return len(self.accumulated_content) + + @property + def is_active(self) -> bool: + """Whether streaming is currently active.""" + return self.phase in { + StreamingPhase.INITIALIZING, + StreamingPhase.RECEIVING, + StreamingPhase.PROCESSING, + } + + @property + def is_complete(self) -> bool: + """Whether streaming has finished (successfully or not).""" + return self.phase in { + StreamingPhase.COMPLETED, + StreamingPhase.INTERRUPTED, + StreamingPhase.ERROR, + } + + def add_chunk(self, chunk: StreamingChunk) -> None: + """Process a new chunk and update state.""" + self.chunks_received += 1 + self.last_chunk_time = time.time() + + if chunk.content: + self.accumulated_content += chunk.content + self._update_content_type(chunk.content) + + if chunk.reasoning_content: + # Accumulate reasoning content (DeepSeek streams it in chunks) + # Add space if we're appending to existing content to avoid word concatenation + if self.reasoning_content and not self.reasoning_content.endswith(" "): + # Check if the new chunk starts with punctuation or space + if ( + chunk.reasoning_content + and chunk.reasoning_content[0] not in " .,!?;:" + ): + self.reasoning_content += " " + self.reasoning_content += chunk.reasoning_content + + if chunk.finish_reason: + self.finish_reason = chunk.finish_reason + + # Update phase if receiving + if self.phase == StreamingPhase.INITIALIZING: + self.phase = StreamingPhase.RECEIVING + + def _update_content_type(self, new_content: str) -> None: + """Detect and update content type based on accumulated content.""" + if self.detected_type != ContentType.UNKNOWN: + return # Already detected + + full_content = self.accumulated_content + + # Detection logic + if "```" in full_content: + self.detected_type = ContentType.CODE + elif self._is_markdown_table(full_content): + self.detected_type = ContentType.MARKDOWN_TABLE + elif "##" in full_content or "###" in full_content: + self.detected_type = ContentType.MARKDOWN + elif any( + x in full_content + for x in ["def ", "function ", "class ", "import ", "const ", "let "] + ): + self.detected_type = ContentType.CODE + elif any( + x in full_content.upper() + for x in ["CREATE TABLE", "SELECT", "INSERT", "UPDATE"] + ): + self.detected_type = ContentType.SQL + elif any(x in full_content for x in ["", "
", "", " bool: + """Check if text contains a markdown table.""" + lines = text.split("\n") + for i, line in enumerate(lines): + if "|" in line and i + 1 < len(lines): + next_line = lines[i + 1] + # Check for separator line like |---|---| + if "|" in next_line and "-" in next_line: + return True + return False + + def complete(self, interrupted: bool = False) -> None: + """Mark streaming as complete.""" + self.end_time = time.time() + self.interrupted = interrupted + if interrupted: + self.phase = StreamingPhase.INTERRUPTED + else: + self.phase = StreamingPhase.COMPLETED + + def mark_error(self) -> None: + """Mark streaming as errored.""" + self.end_time = time.time() + self.phase = StreamingPhase.ERROR + + +class DisplayUpdate(BaseModel): + """An update to send to the display system. + + This represents a single display update operation. + """ + + content: str = Field(description="Content to display") + content_type: ContentType = Field( + default=ContentType.TEXT, description="Type of content" + ) + phase: StreamingPhase = Field(description="Current streaming phase") + chunks_received: int = Field(description="Chunks received so far") + elapsed_time: float = Field(description="Elapsed time in seconds") + reasoning_content: str | None = Field( + default=None, description="Reasoning content to display" + ) + show_spinner: bool = Field( + default=True, description="Whether to show spinner animation" + ) + + model_config = {"frozen": True} + + @classmethod + def from_state(cls, state: StreamingState) -> DisplayUpdate: + """Create a display update from current streaming state.""" + return cls( + content=state.accumulated_content, + content_type=state.detected_type, + phase=state.phase, + chunks_received=state.chunks_received, + elapsed_time=state.elapsed_time, + reasoning_content=state.reasoning_content + if state.reasoning_content + else None, + show_spinner=state.is_active, + ) diff --git a/src/mcp_cli/display/renderers.py b/src/mcp_cli/display/renderers.py new file mode 100644 index 00000000..3e9ceb32 --- /dev/null +++ b/src/mcp_cli/display/renderers.py @@ -0,0 +1,207 @@ +"""Status renderers for streaming and tool execution display. + +This module provides rendering functions for displaying streaming status, +tool execution progress, and final results. +""" + +from __future__ import annotations + +import json +import sys +from typing import TYPE_CHECKING + +from chuk_term.ui import output + +from mcp_cli.display.formatters import format_args_preview + +if TYPE_CHECKING: + from mcp_cli.chat.models import ToolExecutionState + from mcp_cli.display.models import StreamingState + + +def render_streaming_status( + state: StreamingState, + spinner: str, + reasoning_preview: str = "", +) -> str: + """Render streaming status with optional reasoning on separate line. + + Args: + state: Current streaming state + spinner: Current spinner frame + reasoning_preview: Pre-formatted reasoning preview string (from cache) + + Returns: + Formatted status string (may be multi-line) + """ + # Build main status line + status_parts = [ + f"{spinner} Streaming", + f"({state.chunks_received} chunks)", + f"{state.content_length} chars", + f"{state.elapsed_time:.1f}s", + ] + + main_line = " · ".join(status_parts) + + # Add reasoning preview on SAME line if provided + if reasoning_preview: + return f"{main_line} | {reasoning_preview}" + + return main_line + + +def render_tool_execution_status( + tool: ToolExecutionState, + spinner: str, + elapsed: float, +) -> str: + """Render tool execution status with argument preview. + + Args: + tool: Tool execution state + spinner: Current spinner frame + elapsed: Elapsed time in seconds + + Returns: + Formatted status string + """ + # Build status with arguments preview - make it more prominent + status_parts = [ + f"{spinner} Executing tool: {tool.name}", + f"({elapsed:.1f}s)", + ] + + # Add arguments preview (show more args for better visibility) + if tool.arguments: + arg_preview = format_args_preview(tool.arguments, max_args=4, max_len=60) + if arg_preview: + # Use pipe separator for clarity + return " ".join(status_parts) + f" | {arg_preview}" + + return " ".join(status_parts) + + +def show_final_streaming_response( + content: str, + elapsed: float, + interrupted: bool, +) -> None: + """Show final streaming response. + + Args: + content: Final content + elapsed: Elapsed time + interrupted: Whether interrupted + """ + # Note: Display is already cleared by manager's _finish_display() + + if interrupted: + output.warning("⚠️ Streaming interrupted") + else: + # Show assistant response + output.print(f"\n🤖 Assistant ({elapsed:.1f}s):") + output.print(content) + + # Ensure all Rich output is flushed and terminal is ready for subsequent writes + sys.stdout.flush() + + +def _sanitize_for_display(text: str) -> str: + """Sanitize text for display by escaping control characters. + + Args: + text: Raw text that may contain control characters + + Returns: + Text with control characters escaped for safe display + """ + # Replace common control characters that could affect terminal state + replacements = { + "\r\n": "\\r\\n", + "\r": "\\r", + "\n": "\\n", + "\t": "\\t", + "\x1b": "\\x1b", # ESC character + } + result = text + for char, escaped in replacements.items(): + result = result.replace(char, escaped) + return result + + +def show_tool_execution_result( + tool: ToolExecutionState, +) -> None: + """Show final tool execution result. + + Args: + tool: Tool execution state + """ + # Note: Display is already cleared by manager's _finish_display() + + if tool.success: + output.success(f"✓ {tool.name} completed in {tool.elapsed:.2f}s") + if tool.result: + # Try to parse as JSON for better formatting + try: + result_obj = json.loads(tool.result) + # If it's a dict or list, show structured preview + if isinstance(result_obj, dict): + # Show top-level keys + keys = list(result_obj.keys())[:5] + keys_str = ", ".join(keys) + if len(result_obj) > 5: + keys_str += f", ... ({len(result_obj)} keys total)" + output.print(f" Result keys: {keys_str}") + + # Show first few items with preview + preview_lines = [] + for i, (k, v) in enumerate(list(result_obj.items())[:3]): + v_str = json.dumps(v) if isinstance(v, (dict, list)) else str(v) + if len(v_str) > 60: + v_str = v_str[:57] + "..." + # Sanitize to escape control characters + v_str = _sanitize_for_display(v_str) + preview_lines.append(f" • {k}: {v_str}") + if preview_lines: + output.print("\n".join(preview_lines)) + + elif isinstance(result_obj, list): + output.print(f" Result: List with {len(result_obj)} items") + # Show first few items + for i, item in enumerate(result_obj[:3]): + item_str = ( + json.dumps(item) + if isinstance(item, (dict, list)) + else str(item) + ) + if len(item_str) > 60: + item_str = item_str[:57] + "..." + # Sanitize to escape control characters + item_str = _sanitize_for_display(item_str) + output.print(f" [{i}] {item_str}") + if len(result_obj) > 3: + output.print(f" ... and {len(result_obj) - 3} more") + else: + # Simple value - sanitize to escape control characters + result_str = _sanitize_for_display(str(result_obj)) + if len(result_str) > 200: + result_str = result_str[:200] + "..." + output.print(f" Result: {result_str}") + except (json.JSONDecodeError, TypeError): + # Not JSON, show as string with preview - sanitize to escape control characters + result_str = _sanitize_for_display(tool.result) + if len(result_str) > 200: + result_str = result_str[:200] + "..." + output.print(f" Result: {result_str}") + else: + output.error(f"✗ {tool.name} failed after {tool.elapsed:.2f}s") + if tool.result: + # Sanitize error output too + error_str = _sanitize_for_display(tool.result) + output.print(f" Error: {error_str}") + + # Ensure all Rich output is flushed and terminal is ready for subsequent writes + # This helps prevent cursor state issues with direct stdout writes that follow + sys.stdout.flush() diff --git a/src/mcp_cli/interactive/commands/__init__.py b/src/mcp_cli/interactive/commands/__init__.py deleted file mode 100644 index 0be24fdc..00000000 --- a/src/mcp_cli/interactive/commands/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# mcp_cli/interactive/commands/__init__.py -"""Interactive commands package.""" diff --git a/src/mcp_cli/interactive/shell.py b/src/mcp_cli/interactive/shell.py index c393ca5d..4c048265 100644 --- a/src/mcp_cli/interactive/shell.py +++ b/src/mcp_cli/interactive/shell.py @@ -15,6 +15,7 @@ from prompt_toolkit.completion import Completer, Completion # mcp cli +from mcp_cli.config.defaults import DEFAULT_PROVIDER, DEFAULT_MODEL from mcp_cli.tools.manager import ToolManager # Use unified command system @@ -24,8 +25,6 @@ ) from mcp_cli.commands import register_all_commands as register_unified_commands -# Keep old registry for now just for command name completion - # logger logger = logging.getLogger(__name__) @@ -49,8 +48,8 @@ def get_completions(self, document, complete_event): async def interactive_mode( stream_manager: Any = None, tool_manager: ToolManager | None = None, - provider: str = "openai", - model: str = "gpt-4o-mini", + provider: str = DEFAULT_PROVIDER, + model: str = DEFAULT_MODEL, server_names: dict[int, str | None] | None = None, **kwargs, ) -> bool: diff --git a/src/mcp_cli/llm/content_models.py b/src/mcp_cli/llm/content_models.py new file mode 100644 index 00000000..44b0903c --- /dev/null +++ b/src/mcp_cli/llm/content_models.py @@ -0,0 +1,45 @@ +"""Pydantic models for LLM content blocks - no more dict goop!""" + +from __future__ import annotations + +from enum import Enum +from typing import Literal + +from pydantic import BaseModel + + +class ContentBlockType(str, Enum): + """Content block types - no magic strings!""" + + TEXT = "text" + IMAGE = "image" + + +class TextContent(BaseModel): + """Text content block.""" + + type: Literal["text"] = "text" + text: str + + model_config = {"frozen": True} + + +class ImageContent(BaseModel): + """Image content block.""" + + type: Literal["image"] = "image" + source: dict[str, str] # Could be further typed if needed + + model_config = {"frozen": True} + + +# Type alias for content blocks +ContentBlock = TextContent | ImageContent | dict[str, str] # dict for flexibility + + +__all__ = [ + "ContentBlockType", + "TextContent", + "ImageContent", + "ContentBlock", +] diff --git a/src/mcp_cli/llm/llm_client.py b/src/mcp_cli/llm/llm_client.py deleted file mode 100644 index ec0ee239..00000000 --- a/src/mcp_cli/llm/llm_client.py +++ /dev/null @@ -1,51 +0,0 @@ -# mcp_cli/llm/llm_client.py -""" -from __future__ import annotations - -LLM client interface - compatibility layer and stub for tests -""" - -from typing import Any - -try: - # Try to import from the real chuk-llm if available - from chuk_llm.llm.llm_client import get_llm_client as _real_get_llm_client - - _HAS_CHUK_LLM = True -except ImportError: - _HAS_CHUK_LLM = False - - -class LLMClient: - """Base LLM client interface.""" - - async def create_completion(self, *args, **kwargs) -> str: - """Create a completion using the LLM.""" - raise NotImplementedError("Subclasses must implement create_completion") - - -class StubLLMClient(LLMClient): - """Stub LLM client for testing.""" - - def __init__(self, provider: str = "openai", model: str = "gpt-4o-mini"): - self.provider = provider - self.model = model - - async def create_completion(self, *args, **kwargs) -> str: - """Return a test response.""" - return f"Test response from {self.provider} {self.model}" - - -def get_llm_client(provider: str = "openai", model: str | None = None, **kwargs) -> Any: - """ - Get an LLM client instance. - - This function provides compatibility for tests while allowing real usage - when chuk-llm is available. - """ - if _HAS_CHUK_LLM: - # Use real implementation if available - return _real_get_llm_client(provider=provider, model=model, **kwargs) - else: - # Use stub for tests - return StubLLMClient(provider=provider, model=model or "gpt-4o-mini") diff --git a/src/mcp_cli/llm/system_prompt_generator.py b/src/mcp_cli/llm/system_prompt_generator.py deleted file mode 100644 index 94e5e0aa..00000000 --- a/src/mcp_cli/llm/system_prompt_generator.py +++ /dev/null @@ -1,62 +0,0 @@ -# mcp_cli/llm/system_prompt_generator.py -import json - - -class SystemPromptGenerator: - """ - A class for generating system prompts dynamically based on tools JSON and user inputs. - """ - - def __init__(self): - """ - Initialize the SystemPromptGenerator with a default system prompt template. - """ - self.template = """ - In this environment you have access to a set of tools you can use to answer the user's question. - {{ FORMATTING INSTRUCTIONS }} - String and scalar parameters should be specified as is, while lists and objects should use JSON format. Note that spaces for string values are not stripped. The output is not expected to be valid XML and is parsed with regular expressions. - Here are the functions available in JSONSchema format: - {{ TOOL DEFINITIONS IN JSON SCHEMA }} - {{ USER SYSTEM PROMPT }} - {{ TOOL CONFIGURATION }} - """ - self.default_user_system_prompt = "You are an intelligent assistant capable of using tools to solve user queries effectively." - self.default_tool_config = "No additional configuration is required." - - def generate_prompt( - self, - tools: dict, - user_system_prompt: str | None = None, - tool_config: str | None = None, - ) -> str: - """ - Generate a system prompt based on the provided tools JSON, user prompt, and tool configuration. - - Args: - tools (dict): The tools JSON containing definitions of the available tools. - user_system_prompt (str): A user-provided description or instruction for the assistant (optional). - tool_config (str): Additional tool configuration information (optional). - - Returns: - str: The dynamically generated system prompt. - """ - - # set the user system prompt - user_system_prompt = user_system_prompt or self.default_user_system_prompt - - # set the tools config - tool_config = tool_config or self.default_tool_config - - # get the tools schema - tools_json_schema = json.dumps(tools, indent=2) - - # perform replacements - prompt: str = self.template.replace( - "{{ TOOL DEFINITIONS IN JSON SCHEMA }}", tools_json_schema - ) - prompt = prompt.replace("{{ FORMATTING INSTRUCTIONS }}", "") - prompt = prompt.replace("{{ USER SYSTEM PROMPT }}", user_system_prompt) - prompt = prompt.replace("{{ TOOL CONFIGURATION }}", tool_config) - - # return the prompt - return prompt diff --git a/src/mcp_cli/llm/tools_handler.py b/src/mcp_cli/llm/tools_handler.py deleted file mode 100644 index da614694..00000000 --- a/src/mcp_cli/llm/tools_handler.py +++ /dev/null @@ -1,179 +0,0 @@ -# mcp_cli/llm/tools_handler.py -from __future__ import annotations - -import json -import logging -import uuid -from typing import Any - -# Import CHUK tool registry for tool conversions - -from mcp_cli.tools.manager import ToolManager -from mcp_cli.tools.models import ToolCallResult - - -def format_tool_response(response_content: list[dict[str, Any]] | Any) -> str: - """Format the response content from a tool. - - Preserves structured data in a readable format, ensuring that all data is - available for the model in future conversation turns. - """ - # Handle list of dictionaries (likely structured data like SQL results) - if ( - isinstance(response_content, list) - and response_content - and isinstance(response_content[0], dict) - ): - # Check if this looks like text records with type field - if all( - item.get("type") == "text" for item in response_content if "type" in item - ): - # Text records - extract just the text - return "\n".join( - item.get("text", "No content") - for item in response_content - if item.get("type") == "text" - ) - else: - # This could be data records (like SQL results) - # Return a JSON representation that preserves all data - try: - return json.dumps(response_content, indent=2) - except (TypeError, ValueError): - # Fallback if JSON serialization fails - return str(response_content) - elif isinstance(response_content, dict): - # Single dictionary - return as JSON - try: - return json.dumps(response_content, indent=2) - except (TypeError, ValueError): - return str(response_content) - else: - # Default case - convert to string - return str(response_content) - - -async def handle_tool_call( - tool_call: dict[str, Any] | Any, - conversation_history: list[dict[str, Any]], - tool_manager: ToolManager, -) -> None: - """ - Handle a single tool call using the centralized ToolManager. - - This function updates the conversation history with both the tool call and its response. - - Args: - tool_call: The tool call object - conversation_history: The conversation history to update - tool_manager: ToolManager instance for executing tools - """ - tool_name: str = "unknown_tool" - tool_args: dict[str, Any] = {} - tool_call_id: str | None = None - - try: - # Extract tool call information - if hasattr(tool_call, "function"): - tool_name = tool_call.function.name - raw_arguments = tool_call.function.arguments - tool_call_id = getattr(tool_call, "id", None) - elif isinstance(tool_call, dict) and "function" in tool_call: - tool_name = tool_call["function"]["name"] - raw_arguments = tool_call["function"]["arguments"] - tool_call_id = tool_call.get("id") - else: - logging.error("Invalid tool call format") - return - - # Ensure tool arguments are in dictionary form - if isinstance(raw_arguments, str): - try: - tool_args = json.loads(raw_arguments) - except json.JSONDecodeError: - logging.error(f"Failed to parse tool arguments: {raw_arguments}") - tool_args = {} - else: - tool_args = raw_arguments - - # Generate a unique tool call ID if not provided - if not tool_call_id: - tool_call_id = f"call_{tool_name}_{str(uuid.uuid4())[:8]}" - - # Log which tool we're calling - if hasattr(tool_manager, "get_server_for_tool"): - server_name = tool_manager.get_server_for_tool(tool_name) - logging.debug(f"Calling tool '{tool_name}' on server '{server_name}'") - - # Call the tool using ToolManager - result: ToolCallResult = await tool_manager.execute_tool(tool_name, tool_args) - - if not result.success: - error_msg = result.error or "Unknown error" - logging.debug(f"Error calling tool '{tool_name}': {error_msg}") - - # Add failed tool call to conversation history - conversation_history.append( - { - "role": "assistant", - "content": None, - "tool_calls": [ - { - "id": tool_call_id, - "type": "function", - "function": { - "name": tool_name, - "arguments": json.dumps(tool_args), - }, - } - ], - } - ) - - # Add error response - conversation_history.append( - { - "role": "tool", - "name": tool_name, - "content": f"Error: {error_msg}", - "tool_call_id": tool_call_id, - } - ) - return - - raw_content = result.result - - # Format the tool response - formatted_response: str = format_tool_response(raw_content) - logging.debug(f"Tool '{tool_name}' Response: {formatted_response}") - - # Append the tool call (for tracking purposes) - conversation_history.append( - { - "role": "assistant", - "content": None, - "tool_calls": [ - { - "id": tool_call_id, - "type": "function", - "function": { - "name": tool_name, - "arguments": json.dumps(tool_args), - }, - } - ], - } - ) - - # Append the tool's response to the conversation history - conversation_history.append( - { - "role": "tool", - "name": tool_name, - "content": formatted_response, - "tool_call_id": tool_call_id, - } - ) - - except Exception as e: - logging.error(f"Error handling tool call '{tool_name}': {str(e)}") diff --git a/src/mcp_cli/main.py b/src/mcp_cli/main.py index 8b6b0d9b..a9297613 100644 --- a/src/mcp_cli/main.py +++ b/src/mcp_cli/main.py @@ -6,6 +6,7 @@ import asyncio import atexit import gc +import logging import os import signal import sys @@ -13,7 +14,7 @@ import typer # Module imports -from mcp_cli.logging_config import ( +from mcp_cli.config.logging import ( setup_logging, get_logger, setup_silent_mcp_environment, @@ -94,27 +95,54 @@ def main_callback( tool_timeout: float | None = typer.Option( None, "--tool-timeout", - help="Tool execution timeout in seconds (default: 120, can also set MCP_TOOL_TIMEOUT env var)", + help="Tool execution timeout in seconds (default: 120, streaming timeout: 300, can also set MCP_TOOL_TIMEOUT env var)", ), token_backend: str | None = typer.Option( None, "--token-backend", help="Token storage backend: auto, keychain, windows, secretservice, encrypted, vault", ), - max_turns: int = typer.Option(30, "--max-turns", help="Maximum conversation turns"), + max_turns: int = typer.Option( + 100, "--max-turns", help="Maximum conversation turns" + ), + include_tools: str | None = typer.Option( + None, + "--include-tools", + help="Comma-separated list of tool names to include (filters out all others)", + ), + exclude_tools: str | None = typer.Option( + None, + "--exclude-tools", + help="Comma-separated list of tool names to exclude", + ), + dynamic_tools: bool = typer.Option( + False, + "--dynamic-tools", + help="Enable dynamic tool discovery mode (uses meta-tools for on-demand loading)", + ), ) -> None: """MCP CLI - If no subcommand is given, start chat mode.""" # Re-configure logging based on user options (this overrides the default ERROR level) setup_logging(level=log_level, quiet=quiet, verbose=verbose) - # Store tool timeout if specified + # Store tool timeout if specified (type-safe!) + from mcp_cli.config import EnvVar, set_env + if tool_timeout is not None: - os.environ["MCP_TOOL_TIMEOUT"] = str(tool_timeout) + set_env(EnvVar.TOOL_TIMEOUT, str(tool_timeout)) # Store token backend preference if specified if token_backend: - os.environ["MCP_CLI_TOKEN_BACKEND"] = token_backend + set_env(EnvVar.CLI_TOKEN_BACKEND, token_backend) + + # Store tool filtering options if specified + if include_tools: + set_env(EnvVar.CLI_INCLUDE_TOOLS, include_tools) + if exclude_tools: + set_env(EnvVar.CLI_EXCLUDE_TOOLS, exclude_tools) + if dynamic_tools: + set_env(EnvVar.CLI_DYNAMIC_TOOLS, "1") # Set UI theme and confirmation mode - use preference if not specified from mcp_cli.utils.preferences import get_preference_manager @@ -152,18 +180,15 @@ def main_callback( ) output.info(f"Running: provider {provider}") - # Execute the provider command - from mcp_cli.commands.actions.providers import provider_action_async + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute from mcp_cli.context import initialize_context # Initialize context for the provider command initialize_context(token_backend=token_backend) try: - from mcp_cli.commands.models import ProviderActionParams - - params = ProviderActionParams(args=[provider]) - asyncio.run(provider_action_async(params)) + asyncio.run(cli_execute("provider", args=[provider])) except Exception as e: output.error(f"Error: {e}") finally: @@ -266,6 +291,34 @@ def main_callback( quiet=quiet, ) + # NEW: Create clean RuntimeConfig with type-safe overrides + from mcp_cli.config import ConfigOverride, TimeoutType, load_runtime_config + + # Build type-safe CLI overrides + cli_overrides = ConfigOverride() + + if tool_timeout is not None: + # Apply tool_timeout to all related timeouts (type-safe!) + cli_overrides.apply_tool_timeout_to_all(tool_timeout) + logger.debug( + f"Applied --tool-timeout={tool_timeout} to all streaming/tool timeouts" + ) + + if init_timeout != 120.0: + cli_overrides.set_timeout(TimeoutType.SERVER_INIT, init_timeout) + logger.debug(f"Set server init timeout: {init_timeout}s") + + # Load runtime config with file + CLI overrides + runtime_config = load_runtime_config(config_file, cli_overrides) + + # Debug: show what we're using + if verbose or logger.isEnabledFor(logging.DEBUG): + timeouts = runtime_config.get_all_timeouts() + logger.debug( + f"Runtime timeouts: chunk={timeouts.streaming_chunk}s, " + f"global={timeouts.streaming_global}s, tool={timeouts.tool_execution}s" + ) + from mcp_cli.chat.chat_handler import handle_chat_mode # Start chat mode directly with proper cleanup @@ -276,7 +329,7 @@ async def _start_chat(): from mcp_cli.run_command import _init_tool_manager tm = await _init_tool_manager( - config_file, servers, server_names, init_timeout + config_file, servers, server_names, init_timeout, runtime_config ) logger.debug("Starting chat mode handler") @@ -288,6 +341,7 @@ async def _start_chat(): api_key=api_key, max_turns=max_turns, model_manager=model_manager, # FIXED: Pass the model manager with runtime provider + runtime_config=runtime_config, # Pass runtime config with timeout overrides ) logger.debug(f"Chat mode completed with success: {success}") except asyncio.TimeoutError: @@ -678,13 +732,13 @@ def _interactive_command( # Shared provider command function def _run_provider_command(args, log_prefix="Provider command"): """Shared function to run provider commands.""" - from mcp_cli.commands.actions.providers import provider_action_async + from mcp_cli.adapters.cli import cli_execute # Initialize context for the provider command initialize_context() try: - asyncio.run(provider_action_async(args)) + asyncio.run(cli_execute("providers", args=args)) except Exception as e: output.error(f"Error: {e}") raise typer.Exit(1) @@ -874,14 +928,15 @@ def tools_command( server, disable_filesystem, provider, model, config_file, quiet=quiet ) - # Import and use the tools action - USE ASYNC VERSION - from mcp_cli.commands.actions.tools import tools_action_async + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute # Execute via run_command_sync with async wrapper async def _tools_wrapper(**params): - return await tools_action_async( - show_details=params.get("all", False), - show_raw=params.get("raw", False), + return await cli_execute( + "tools", + details=params.get("all", False), + raw=params.get("raw", False), ) run_command_sync( @@ -962,13 +1017,15 @@ def servers_command( server, disable_filesystem, provider, model, config_file, quiet=quiet ) - from mcp_cli.commands.actions.servers import servers_action_async + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute async def _servers_wrapper(**params): - return await servers_action_async( + return await cli_execute( + "servers", detailed=params.get("detailed", False), - show_capabilities=params.get("capabilities", False), - show_transport=params.get("transport", False), + capabilities=params.get("capabilities", False), + transport=params.get("transport", False), output_format=params.get("output_format", "table"), ) @@ -1014,10 +1071,11 @@ def resources_command( server, disable_filesystem, provider, model, config_file ) - from mcp_cli.commands.resources import resources_action_async + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute async def _resources_wrapper(**params): - return await resources_action_async() + return await cli_execute("resources") run_command_sync( _resources_wrapper, @@ -1055,10 +1113,11 @@ def prompts_command( server, disable_filesystem, provider, model, config_file ) - from mcp_cli.commands.prompts import prompts_action_async + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute async def _prompts_wrapper(**params): - return await prompts_action_async() + return await cli_execute("prompts") run_command_sync( _prompts_wrapper, @@ -1180,10 +1239,20 @@ def theme_command( quiet, verbose, log_level, "default" ) # Start with default theme - from mcp_cli.commands.theme import theme_command as theme_cmd + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute + import asyncio + + async def _theme_wrapper(): + return await cli_execute( + "theme", + theme_name=theme_name, + list_themes=list_themes, + select=select, + ) # Execute theme command - theme_cmd(theme_name, list_themes, select) + asyncio.run(_theme_wrapper()) direct_registered.append("theme") @@ -1235,107 +1304,34 @@ def token_command( # Configure logging for this command _setup_command_logging(quiet, verbose, log_level, "default") - from mcp_cli.commands.actions.token import ( - token_list_action_async, - token_set_action_async, - token_get_action_async, - token_delete_action_async, - token_clear_action_async, - token_backends_action_async, - token_set_provider_action_async, - token_get_provider_action_async, - token_delete_provider_action_async, - ) + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute import asyncio + # Compute default namespace + default_namespace = token_type if token_type in ["bearer", "api-key"] else "generic" + async def _token_wrapper(): - if action == "list": - return await token_list_action_async( - namespace=namespace, - show_oauth=show_oauth, - show_bearer=show_bearer, - show_api_keys=show_api_keys, - show_providers=show_providers, - ) - elif action == "set": - if not name: - output.error("Token name is required for 'set' action") - raise typer.Exit(1) - from mcp_cli.commands.models import TokenSetParams - - # Use token_type as default namespace if not specified - default_namespace = ( - token_type if token_type in ["bearer", "api-key"] else "generic" - ) - return await token_set_action_async( - TokenSetParams( - name=name, - token_type=token_type, - value=value, - provider=provider, - namespace=namespace or default_namespace, - ) - ) - elif action == "get": - if not name: - output.error("Token name is required for 'get' action") - raise typer.Exit(1) - # Use token_type as default namespace if not specified - default_namespace = ( - token_type if token_type in ["bearer", "api-key"] else "generic" - ) - return await token_get_action_async( - name=name, - namespace=namespace or default_namespace, - ) - elif action == "delete": - if not name: - output.error("Token name is required for 'delete' action") - raise typer.Exit(1) - from mcp_cli.commands.models import TokenDeleteParams - - # Use token_type as default namespace if not specified - default_namespace = ( - token_type if token_type in ["bearer", "api-key"] else "generic" - ) - return await token_delete_action_async( - TokenDeleteParams( - name=name, - namespace=namespace or default_namespace, - oauth=is_oauth, - ) - ) - elif action == "clear": - return await token_clear_action_async( - namespace=namespace, - force=force, - ) - elif action == "backends": - return await token_backends_action_async() - elif action == "set-provider": - if not name: - output.error("Provider name is required for 'set-provider' action") - raise typer.Exit(1) - return await token_set_provider_action_async( - provider=name, - api_key=value, - ) - elif action == "get-provider": - if not name: - output.error("Provider name is required for 'get-provider' action") - raise typer.Exit(1) - return await token_get_provider_action_async(provider=name) - elif action == "delete-provider": - if not name: - output.error("Provider name is required for 'delete-provider' action") - raise typer.Exit(1) - return await token_delete_provider_action_async(provider=name) - else: - output.error(f"Unknown action: {action}") - output.hint( - "Valid actions: list, set, get, delete, clear, backends, set-provider, get-provider, delete-provider" - ) - raise typer.Exit(1) + return await cli_execute( + "token", + action=action, + name=name + if action not in ["set-provider", "get-provider", "delete-provider"] + else None, + value=value, + token_type=token_type, + provider=name + if action in ["set-provider", "get-provider", "delete-provider"] + else provider, + namespace=namespace or default_namespace, + show_oauth=show_oauth, + show_bearer=show_bearer, + show_api_keys=show_api_keys, + show_providers=show_providers, + is_oauth=is_oauth, + force=force, + api_key=value if action == "set-provider" else None, + ) # Run the async function asyncio.run(_token_wrapper()) @@ -1390,125 +1386,35 @@ def tokens_command( # Configure logging for this command _setup_command_logging(quiet, verbose, log_level, "default") - from mcp_cli.commands.actions.token import ( - token_list_action_async, - token_set_action_async, - token_get_action_async, - token_delete_action_async, - token_clear_action_async, - token_backends_action_async, - token_set_provider_action_async, - token_get_provider_action_async, - token_delete_provider_action_async, - ) + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute import asyncio async def _tokens_wrapper(): - # Default to 'list' if no action specified (like providers command) + # Default to 'list' if no action specified effective_action = action or "list" - if effective_action == "list": - from mcp_cli.commands.models import TokenListParams - import json - from pathlib import Path - - # Load server names from config - server_names = [] - try: - config_path = Path("server_config.json") - if config_path.exists(): - with open(config_path, "r") as f: - config = json.load(f) - server_names = list(config.get("mcpServers", {}).keys()) - except Exception: - pass # Silently ignore config load errors - - params = TokenListParams( - namespace=namespace, - show_oauth=show_oauth, - show_bearer=show_bearer, - show_api_keys=show_api_keys, - show_providers=show_providers, - server_names=server_names, - ) - return await token_list_action_async(params) - elif effective_action == "set": - if not name: - output.error("Token name is required for 'set' action") - raise typer.Exit(1) - from mcp_cli.commands.models import TokenSetParams - - params = TokenSetParams( - name=name, - token_type=token_type, - value=value, - provider=provider, - namespace=namespace or "generic", - ) - return await token_set_action_async(params) - elif effective_action == "get": - if not name: - output.error("Token name is required for 'get' action") - raise typer.Exit(1) - return await token_get_action_async( - name=name, - namespace=namespace or "generic", - ) - elif effective_action == "delete": - if not name: - output.error("Token name is required for 'delete' action") - raise typer.Exit(1) - from mcp_cli.commands.models import TokenDeleteParams - - params = TokenDeleteParams( - name=name, - namespace=namespace, - oauth=is_oauth, - ) - return await token_delete_action_async(params) - elif effective_action == "clear": - from mcp_cli.commands.models import TokenClearParams - - params = TokenClearParams( - namespace=namespace, - force=force, - ) - return await token_clear_action_async(params) - elif effective_action == "backends": - return await token_backends_action_async() - elif effective_action == "set-provider": - if not name: - output.error("Provider name is required for 'set-provider' action") - raise typer.Exit(1) - from mcp_cli.commands.models import TokenProviderParams - - params = TokenProviderParams( - provider=name, - api_key=value, - ) - return await token_set_provider_action_async(params) - elif effective_action == "get-provider": - if not name: - output.error("Provider name is required for 'get-provider' action") - raise typer.Exit(1) - from mcp_cli.commands.models import TokenProviderParams - - params = TokenProviderParams(provider=name) - return await token_get_provider_action_async(params) - elif effective_action == "delete-provider": - if not name: - output.error("Provider name is required for 'delete-provider' action") - raise typer.Exit(1) - from mcp_cli.commands.models import TokenProviderParams - - params = TokenProviderParams(provider=name) - return await token_delete_provider_action_async(params) - else: - output.error(f"Unknown action: {effective_action}") - output.hint( - "Valid actions: list, set, get, delete, clear, backends, set-provider, get-provider, delete-provider" - ) - raise typer.Exit(1) + return await cli_execute( + "token", + action=effective_action, + name=name + if effective_action + not in ["set-provider", "get-provider", "delete-provider"] + else None, + value=value, + token_type=token_type, + provider=name + if effective_action in ["set-provider", "get-provider", "delete-provider"] + else provider, + namespace=namespace or "generic", + show_oauth=show_oauth, + show_bearer=show_bearer, + show_api_keys=show_api_keys, + show_providers=show_providers, + is_oauth=is_oauth, + force=force, + api_key=value if effective_action == "set-provider" else None, + ) # Run the async function asyncio.run(_tokens_wrapper()) @@ -1538,7 +1444,9 @@ def cmd_command( single_turn: bool = typer.Option( False, "--single-turn", help="Disable multi-turn conversation" ), - max_turns: int = typer.Option(30, "--max-turns", help="Maximum conversation turns"), + max_turns: int = typer.Option( + 100, "--max-turns", help="Maximum conversation turns" + ), config_file: str = typer.Option( "server_config.json", help="Configuration file path" ), @@ -1589,12 +1497,13 @@ def cmd_command( quiet=quiet, ) - # Import cmd action - from mcp_cli.commands.actions.cmd import cmd_action_async + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute # Execute via run_command_sync async def _cmd_wrapper(**params): - return await cmd_action_async( + return await cli_execute( + "cmd", input_file=params.get("input_file"), output_file=params.get("output_file"), prompt=params.get("prompt"), @@ -1603,7 +1512,7 @@ async def _cmd_wrapper(**params): system_prompt=params.get("system_prompt"), raw=params.get("raw", False), single_turn=params.get("single_turn", False), - max_turns=params.get("max_turns", 30), + max_turns=params.get("max_turns", 100), ) run_command_sync( @@ -1662,17 +1571,13 @@ def ping_command( server, disable_filesystem, provider, model, config_file, quiet=quiet ) - # Import and use the ping action - from mcp_cli.commands.actions.ping import ping_action_async + # Use unified command system via CLI adapter + from mcp_cli.adapters.cli import cli_execute # Wrapper for the async action async def _ping_wrapper(**params): - # Get the tool manager from the global context, which is initialized by run_command_sync - from mcp_cli.context import get_context - tm = get_context().tool_manager - - return await ping_action_async( - tm=tm, + return await cli_execute( + "ping", server_names=params.get("server_names"), targets=params.get("targets", []), ) diff --git a/src/mcp_cli/model_management/client_factory.py b/src/mcp_cli/model_management/client_factory.py index b79d40dd..d9a3a6a2 100644 --- a/src/mcp_cli/model_management/client_factory.py +++ b/src/mcp_cli/model_management/client_factory.py @@ -1,8 +1,4 @@ -# src/mcp_cli/model_management/client_factory.py -""" -from __future__ import annotations - -Client factory for creating LLM clients using chuk_llm. +"""mcp_cli.model_management.client_factory - Client factory for creating LLM clients using chuk_llm. This module handles the creation and caching of LLM clients for all providers using chuk_llm's unified client factory. For custom OpenAI-compatible providers, @@ -11,11 +7,16 @@ NO direct OpenAI client creation - everything goes through chuk_llm. """ +from __future__ import annotations + import logging -from typing import Any +from typing import TYPE_CHECKING, Any from mcp_cli.model_management.provider import RuntimeProviderConfig +if TYPE_CHECKING: + from chuk_llm.llm.core.base import BaseLLMClient + logger = logging.getLogger(__name__) @@ -37,7 +38,7 @@ def get_client( model: str | None, config: RuntimeProviderConfig | None = None, chuk_config: Any = None, - ) -> Any: + ) -> "BaseLLMClient": """ Get or create a client for the specified provider and model. diff --git a/src/mcp_cli/model_management/model_manager.py b/src/mcp_cli/model_management/model_manager.py index 87025683..50820484 100644 --- a/src/mcp_cli/model_management/model_manager.py +++ b/src/mcp_cli/model_management/model_manager.py @@ -1,8 +1,4 @@ -# src/mcp_cli/model_management/model_manager.py -""" -from __future__ import annotations - -ModelManager - Clean, type-safe LLM provider and model management. +"""mcp_cli.model_management.model_manager - Clean, type-safe LLM provider and model management. This module provides the main ModelManager class that orchestrates: - Provider discovery and listing (from chuk_llm configuration) @@ -10,15 +6,22 @@ - Runtime provider management (OpenAI-compatible APIs) - Client creation and caching -NO HARDCODED MODELS - All model data comes from: -1. chuk_llm configuration (for standard providers) -2. Runtime provider configs (for custom providers) -3. API discovery (for OpenAI-compatible providers) +NO HARDCODED MODELS OR PROVIDERS - All defaults come from: +1. mcp_cli.config.defaults (for default provider/model) +2. chuk_llm configuration (for standard providers) +3. Runtime provider configs (for custom providers) +4. API discovery (for OpenAI-compatible providers) """ +from __future__ import annotations + import logging -from typing import Any +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from chuk_llm.llm.core.base import BaseLLMClient +from mcp_cli.config.defaults import DEFAULT_PROVIDER from mcp_cli.model_management.provider import RuntimeProviderConfig from mcp_cli.model_management.client_factory import ClientFactory from mcp_cli.model_management.provider_discovery import ProviderDiscovery @@ -44,53 +47,34 @@ def __init__(self) -> None: self._active_model: str | None = None self._custom_providers: dict[str, RuntimeProviderConfig] = {} self._client_factory = ClientFactory() - self._discovery_triggered = False self._initialize_chuk_llm() self._load_custom_providers() + # Note: Discovery is NOT triggered automatically - call refresh_models() if needed # ── Initialization ──────────────────────────────────────────────────────── def _initialize_chuk_llm(self) -> None: - """Initialize chuk_llm configuration and trigger discovery.""" + """Initialize chuk_llm configuration.""" try: from chuk_llm.configuration import get_config self._chuk_config = get_config() logger.debug("Loaded chuk_llm configuration") - # Set defaults from chuk_llm + # Use configured default provider (from defaults.py) if self._chuk_config: - self._active_provider = "ollama" # type: ignore[unreachable] # Safe default + self._active_provider = DEFAULT_PROVIDER # type: ignore[unreachable] # Defer model resolution to avoid circular dependencies during __init__ self._active_model = None - # Trigger discovery - self._trigger_discovery() - except Exception as e: logger.error(f"Failed to initialize chuk_llm: {e}") - # Minimal fallback + # Minimal fallback - use configured default self._chuk_config = None - self._active_provider = "ollama" + self._active_provider = DEFAULT_PROVIDER self._active_model = None # Will be determined on first use - def _trigger_discovery(self) -> None: - """Trigger model discovery for providers.""" - if self._discovery_triggered: - return - - try: - from chuk_llm.api.providers import trigger_ollama_discovery_and_refresh - - new_functions = trigger_ollama_discovery_and_refresh() - logger.info( - f"ModelManager discovery: {len(new_functions)} new Ollama functions" - ) - self._discovery_triggered = True - except Exception as e: - logger.warning(f"ModelManager discovery failed (continuing anyway): {e}") - def _load_custom_providers(self) -> None: """Load custom providers from preferences.""" try: @@ -122,27 +106,23 @@ def get_available_providers(self) -> list[str]: Get list of all available providers. Returns: - List of provider names + List of provider names (sorted alphabetically) """ - providers = [] + providers: set[str] = set() # Get chuk_llm providers if self._chuk_config: try: # type: ignore[unreachable] all_providers = self._chuk_config.get_all_providers() - # Ollama first, then others alphabetically - if "ollama" in all_providers: - providers.append("ollama") - providers.extend([p for p in sorted(all_providers) if p != "ollama"]) + providers.update(all_providers) except Exception as e: logger.error(f"Failed to get chuk_llm providers: {e}") # Add custom providers - for custom_name in self._custom_providers.keys(): - if custom_name not in providers: - providers.append(custom_name) + providers.update(self._custom_providers.keys()) - return providers if providers else ["ollama"] # Safe fallback + # Return sorted list, with configured default as fallback if empty + return sorted(providers) if providers else [DEFAULT_PROVIDER] def add_runtime_provider( self, @@ -301,14 +281,16 @@ def refresh_models(self, provider: str | None = None) -> int: Manually refresh models for a provider. Args: - provider: Provider name (refreshes all if None) + provider: Provider name (refreshes active provider if None) Returns: Number of new models discovered """ + target_provider = provider or self._active_provider + # Check if it's a runtime/custom provider first - if provider and provider in self._custom_providers: - config = self._custom_providers[provider] + if target_provider and target_provider in self._custom_providers: + config = self._custom_providers[target_provider] count = ProviderDiscovery.refresh_provider_models(config) if count is not None: self._client_factory.clear_cache() # Clear cache after refresh @@ -316,28 +298,25 @@ def refresh_models(self, provider: str | None = None) -> int: return 0 # Use chuk_llm refresh for standard providers - try: - if provider == "ollama" or provider is None: - from chuk_llm.api.providers import trigger_ollama_discovery_and_refresh + if not target_provider: + logger.warning("No provider specified for refresh") + return 0 - new_functions = trigger_ollama_discovery_and_refresh() - logger.info(f"Refreshed Ollama: {len(new_functions)} functions") - return len(new_functions) - else: - from chuk_llm.api.providers import refresh_provider_functions + try: + from chuk_llm.api.providers import refresh_provider_functions - new_functions = refresh_provider_functions(provider) - logger.info(f"Refreshed {provider}: {len(new_functions)} functions") - return len(new_functions) + new_functions = refresh_provider_functions(target_provider) + logger.info(f"Refreshed {target_provider}: {len(new_functions)} functions") + return len(new_functions) except Exception as e: - logger.error(f"Failed to refresh models for {provider}: {e}") + logger.error(f"Failed to refresh models for {target_provider}: {e}") return 0 # ── Active Provider/Model Management ────────────────────────────────────── def get_active_provider(self) -> str: """Get the currently active provider.""" - return self._active_provider or "ollama" + return self._active_provider or DEFAULT_PROVIDER def get_active_model(self) -> str: """Get the currently active model.""" @@ -364,7 +343,9 @@ def switch_model(self, provider: str, model: str): # ── Client Management ───────────────────────────────────────────────────── - def get_client(self, provider: str | None = None, model: str | None = None) -> Any: + def get_client( + self, provider: str | None = None, model: str | None = None + ) -> "BaseLLMClient": """ Get a client for the specified or active provider/model. diff --git a/src/mcp_cli/model_management/provider_discovery.py b/src/mcp_cli/model_management/provider_discovery.py index 6c11ddbf..3eb13bc3 100644 --- a/src/mcp_cli/model_management/provider_discovery.py +++ b/src/mcp_cli/model_management/provider_discovery.py @@ -1,13 +1,11 @@ -# src/mcp_cli/model_management/provider_discovery.py -""" -from __future__ import annotations - -Provider and model discovery functionality. +"""mcp_cli.model_management.provider_discovery - Provider and model discovery functionality. This module handles discovering models from OpenAI-compatible APIs and refreshing model lists for providers. """ +from __future__ import annotations + import logging from mcp_cli.model_management.discovery import DiscoveryResult @@ -55,7 +53,9 @@ def discover_models_from_api( headers = {"Authorization": f"Bearer {api_key}"} logger.debug(f"Discovering models from {models_url}") - with httpx.Client(timeout=10.0) as client: + from mcp_cli.config import DISCOVERY_TIMEOUT + + with httpx.Client(timeout=DISCOVERY_TIMEOUT) as client: response = client.get(models_url, headers=headers) response.raise_for_status() diff --git a/src/mcp_cli/run_command.py b/src/mcp_cli/run_command.py index b30b3b62..bdb2e90d 100644 --- a/src/mcp_cli/run_command.py +++ b/src/mcp_cli/run_command.py @@ -1,4 +1,4 @@ -# mcp_cli/run_command.py - COMPLETE FIXED VERSION +# mcp_cli/run_command.py """ Main entry-point helpers for all CLI sub-commands. @@ -14,25 +14,69 @@ from __future__ import annotations import asyncio -import importlib import logging import sys -from typing import Any, Callable +from typing import TYPE_CHECKING, Any, Callable from pathlib import Path import typer from rich.panel import Panel from chuk_term.ui import output -from mcp_cli.tools.manager import set_tool_manager # only the setter +from mcp_cli.config.defaults import DEFAULT_PROVIDER, DEFAULT_MODEL +from mcp_cli.tools.manager import ToolManager, set_tool_manager from mcp_cli.context import initialize_context +if TYPE_CHECKING: + from mcp_cli.config.runtime import RuntimeConfig + logger = logging.getLogger(__name__) # --------------------------------------------------------------------------- # # internal helpers / globals # # --------------------------------------------------------------------------- # -_ALL_TM: list[Any] = [] # referenced by the unit-tests +_ALL_TM: list[ToolManager] = [] # referenced by the unit-tests + +# Factory function for ToolManager - can be patched in tests +_tool_manager_factory: Callable[..., ToolManager] | None = None + + +def set_tool_manager_factory(factory: Callable[..., ToolManager] | None) -> None: + """Set a custom ToolManager factory for testing. + + Args: + factory: A callable that creates ToolManager instances, or None to reset. + """ + global _tool_manager_factory + _tool_manager_factory = factory + + +def _create_tool_manager( + config_file: str, + servers: list[str], + server_names: dict[int, str | None] | None = None, + initialization_timeout: float = 120.0, + runtime_config: "RuntimeConfig | None" = None, +) -> ToolManager: + """Create a ToolManager instance using factory or default constructor. + + This pattern allows tests to inject mock ToolManagers without dynamic getattr. + """ + if _tool_manager_factory is not None: + return _tool_manager_factory( + config_file, + servers, + server_names, + initialization_timeout=initialization_timeout, + runtime_config=runtime_config, + ) + return ToolManager( + config_file, + servers, + server_names, + initialization_timeout=initialization_timeout, + runtime_config=runtime_config, + ) # --------------------------------------------------------------------------- # @@ -43,19 +87,22 @@ async def _init_tool_manager( servers: list[str], server_names: dict[int, str | None] | None = None, initialization_timeout: float = 120.0, -): + runtime_config: "RuntimeConfig | None" = None, +) -> ToolManager: """ - Dynamically import **ToolManager** (so monkey-patching works) and create it. + Create and initialize a ToolManager instance. + + Uses _create_tool_manager() which can be patched via set_tool_manager_factory() + for testing purposes. + ENHANCED: Automatically selects appropriate namespace based on server type. """ - tm_mod = importlib.import_module("mcp_cli.tools.manager") - ToolManager = getattr(tm_mod, "ToolManager") # patched in tests - - tm = ToolManager( + tm = _create_tool_manager( config_file, servers, server_names, initialization_timeout=initialization_timeout, + runtime_config=runtime_config, ) # ENHANCED: Let ToolManager automatically select the namespace @@ -63,7 +110,7 @@ async def _init_tool_manager( ok = await tm.initialize() # Remove the hardcoded namespace parameter # Clean up any loggers that were created during initialization - from mcp_cli.logging_config import setup_logging + from mcp_cli.config.logging import setup_logging import os log_level = os.environ.get("LOG_LEVEL", "WARNING") @@ -133,8 +180,8 @@ async def run_command( context = initialize_context( tool_manager=tm, config_path=Path(config_file), - provider=(extra_params or {}).get("provider", "openai"), - model=(extra_params or {}).get("model", "gpt-4o-mini"), + provider=(extra_params or {}).get("provider", DEFAULT_PROVIDER), + model=(extra_params or {}).get("model", DEFAULT_MODEL), api_base=(extra_params or {}).get("api_base"), api_key=(extra_params or {}).get("api_key"), ) diff --git a/src/mcp_cli/tools/__init__.py b/src/mcp_cli/tools/__init__.py index e69de29b..285af43b 100644 --- a/src/mcp_cli/tools/__init__.py +++ b/src/mcp_cli/tools/__init__.py @@ -0,0 +1 @@ +# mcp_cli/tools/__init__.py diff --git a/src/mcp_cli/tools/config_loader.py b/src/mcp_cli/tools/config_loader.py new file mode 100644 index 00000000..ada4b9a4 --- /dev/null +++ b/src/mcp_cli/tools/config_loader.py @@ -0,0 +1,363 @@ +# mcp_cli/tools/config_loader.py +"""MCP configuration loading and OAuth integration. + +Handles parsing MCP config files, detecting server types, and OAuth token management. +Async-native with proper type safety. +""" + +from __future__ import annotations + +import asyncio +import json +import logging +from typing import TYPE_CHECKING, Any, cast + +from mcp_cli.auth import OAuthHandler, TokenStoreBackend, TokenStoreFactory, StoredToken +from mcp_cli.config.server_models import HTTPServerConfig, STDIOServerConfig +from mcp_cli.tools.models import TransportType + +if TYPE_CHECKING: + pass + +logger = logging.getLogger(__name__) + +# ────────────────────────────────────────────────────────────────────────────── +# Constants - no magic strings! +# ────────────────────────────────────────────────────────────────────────────── +# Legacy format: {{token:provider}} +TOKEN_PLACEHOLDER_PREFIX = "{{token:" +TOKEN_PLACEHOLDER_SUFFIX = "}}" +# New format: ${TOKEN:namespace:name} +TOKEN_ENV_PREFIX = "${TOKEN:" +TOKEN_ENV_SUFFIX = "}" +CONFIG_KEY_MCP_SERVERS = "mcpServers" + + +class ConfigLoader: + """Loads and processes MCP configuration files with OAuth support.""" + + def __init__(self, config_file: str, servers: list[str]) -> None: + """Initialize config loader. + + Args: + config_file: Path to MCP config JSON file + servers: List of server names to load + """ + self.config_file = config_file + self.servers = servers + self._config_cache: dict[str, Any] | None = None + + # Token store for retrieving stored tokens + self._token_store = TokenStoreFactory.create( + backend=TokenStoreBackend.AUTO, + service_name="mcp-cli", + ) + + # Detected servers by transport type + self.http_servers: list[HTTPServerConfig] = [] + self.sse_servers: list[HTTPServerConfig] = [] + self.stdio_servers: list[STDIOServerConfig] = [] + + def load(self) -> dict[str, Any]: + """Load and parse MCP config file with token resolution (sync). + + For async contexts, prefer load_async() to avoid blocking the event loop. + + Returns: + Parsed config dict, or empty dict on error + """ + if self._config_cache: + return self._config_cache + + try: + with open(self.config_file) as f: + config = cast(dict[str, Any], json.load(f)) + + # Resolve {{token:provider}} placeholders + self._resolve_token_placeholders(config) + + self._config_cache = config + return config + + except FileNotFoundError: + logger.warning(f"Config file not found: {self.config_file}") + return {} + except json.JSONDecodeError as e: + logger.error(f"Invalid JSON in config: {e}") + return {} + except Exception as e: + logger.error(f"Error loading config: {e}") + return {} + + async def load_async(self) -> dict[str, Any]: + """Load and parse MCP config file with token resolution (async). + + Uses asyncio.to_thread() to avoid blocking the event loop during file I/O. + + Returns: + Parsed config dict, or empty dict on error + """ + if self._config_cache: + return self._config_cache + + try: + # Use asyncio.to_thread for non-blocking file I/O + def _read_file() -> str: + with open(self.config_file) as f: + return f.read() + + content = await asyncio.to_thread(_read_file) + config = cast(dict[str, Any], json.loads(content)) + + # Resolve {{token:provider}} placeholders + self._resolve_token_placeholders(config) + + self._config_cache = config + return config + + except FileNotFoundError: + logger.warning(f"Config file not found: {self.config_file}") + return {} + except json.JSONDecodeError as e: + logger.error(f"Invalid JSON in config: {e}") + return {} + except Exception as e: + logger.error(f"Error loading config: {e}") + return {} + + def _resolve_token_placeholders(self, config: dict[str, Any]) -> None: + """Replace token placeholders with actual tokens. + + Supports two formats: + - Legacy: {{token:provider}} - for OAuth tokens + - New: ${TOKEN:namespace:name} - for bearer/api-key tokens + """ + + def process_value(value: Any) -> Any: + if isinstance(value, str): + # Handle legacy format: {{token:provider}} + if value.startswith(TOKEN_PLACEHOLDER_PREFIX): + provider = value[ + len(TOKEN_PLACEHOLDER_PREFIX) : -len(TOKEN_PLACEHOLDER_SUFFIX) + ] + try: + # Try to load OAuth tokens + raw_data = self._token_store._retrieve_raw(f"oauth:{provider}") + if raw_data: + stored = StoredToken.model_validate(json.loads(raw_data)) + # OAuth tokens store access_token in data dict + access_token = ( + stored.data.get("access_token") if stored.data else None + ) + if access_token: + return f"Bearer {access_token}" + except Exception as e: + logger.warning(f"Failed to get token for {provider}: {e}") + + # Handle new format: ${TOKEN:namespace:name} + elif value.startswith(TOKEN_ENV_PREFIX): + # Extract namespace:name + inner = value[len(TOKEN_ENV_PREFIX) : -len(TOKEN_ENV_SUFFIX)] + parts = inner.split(":") + if len(parts) >= 2: + namespace = parts[0] + name = parts[1] + try: + # Get token from token store using namespace:name format + raw_data = self._token_store._retrieve_raw( + f"{namespace}:{name}" + ) + if raw_data: + stored = StoredToken.model_validate( + json.loads(raw_data) + ) + # Token is in data dict - check for 'token' or 'access_token' + token_value = None + if stored.data: + token_value = stored.data.get( + "token" + ) or stored.data.get("access_token") + if token_value: + logger.debug( + f"Resolved token {namespace}:{name} for env var" + ) + return token_value + else: + logger.warning( + f"Token {namespace}:{name} has no token value in data" + ) + else: + logger.warning(f"Token not found: {namespace}:{name}") + except Exception as e: + logger.warning( + f"Failed to get token {namespace}:{name}: {e}" + ) + + # FIXED: dict/list recursion was unreachable (nested inside `if isinstance(value, str)`) + elif isinstance(value, dict): + return {k: process_value(v) for k, v in value.items()} + elif isinstance(value, list): + return [process_value(item) for item in value] + return value + + # Process entire config using constant + if CONFIG_KEY_MCP_SERVERS in config: + config[CONFIG_KEY_MCP_SERVERS] = process_value( + config[CONFIG_KEY_MCP_SERVERS] + ) + + def detect_server_types(self, config: dict[str, Any]) -> None: + """Detect HTTP/SSE/STDIO servers from config and populate server lists.""" + mcp_servers = config.get(CONFIG_KEY_MCP_SERVERS, {}) + + # Clear existing lists + self.http_servers.clear() + self.sse_servers.clear() + self.stdio_servers.clear() + + for server_name in self.servers: + if server_name not in mcp_servers: + logger.warning(f"Server '{server_name}' not found in config") + continue + + server_cfg = mcp_servers[server_name] + + if "url" in server_cfg: + transport_str = server_cfg.get("transport", "").lower() + http_config = HTTPServerConfig( + name=server_name, + url=server_cfg["url"], + headers=server_cfg.get("headers", {}), + disabled=server_cfg.get("disabled", False), + ) + + # Use TransportType enum for comparison - no magic strings! + if TransportType.SSE.value in transport_str: + self.sse_servers.append(http_config) + else: + self.http_servers.append(http_config) + else: + # STDIO server + stdio_config = STDIOServerConfig( + name=server_name, + command=server_cfg.get("command", ""), + args=server_cfg.get("args", []), + env=server_cfg.get("env", {}), + disabled=server_cfg.get("disabled", False), + ) + self.stdio_servers.append(stdio_config) + + def create_oauth_refresh_callback( + self, + http_servers: list[HTTPServerConfig], + sse_servers: list[HTTPServerConfig], + ): + """Create OAuth token refresh callback for StreamManager. + + Args: + http_servers: List of HTTP server configs + sse_servers: List of SSE server configs + + Returns: + Async callback function for token refresh + """ + + async def refresh_oauth_token( + server_url: str | None = None, + ) -> dict[str, str] | None: + """ + Refresh OAuth token for a server and return updated headers. + + Args: + server_url: URL of the server that needs token refresh + + Returns: + Dictionary with updated Authorization header, or None if refresh failed + """ + logger.info(f"OAuth token refresh triggered for URL: {server_url}") + + if not server_url: + logger.warning("Cannot refresh OAuth token: server URL not provided") + return None + + # Map URL back to server name + base_url = server_url.replace("/mcp", "") + server_name = None + + for server_list in [http_servers, sse_servers]: + for server_config in server_list: + config_url = server_config.url.replace("/mcp", "") + if config_url == base_url or server_config.url == server_url: + server_name = server_config.name + break + if server_name: + break + + if not server_name: + logger.error(f"Cannot map URL {server_url} to a known server") + return None + + logger.debug(f"Mapped URL {server_url} to server: {server_name}") + + try: + # Get token store + token_store = TokenStoreFactory.create( + backend=TokenStoreBackend.AUTO, + service_name="mcp-cli", + ) + + # Get existing token data from oauth namespace + raw_data = token_store._retrieve_raw(f"oauth:{server_name}") + + if not raw_data: + logger.warning(f"No token found for server: {server_name}") + return None + + stored = StoredToken.model_validate(json.loads(raw_data)) + + # Check if we have a refresh token (stored in data dict) + refresh_token = ( + stored.data.get("refresh_token") if stored.data else None + ) + + if not refresh_token: + logger.warning( + f"No refresh_token available for server: {server_name}, " + "re-authentication required" + ) + return None + + # Attempt to refresh the token + oauth_handler = OAuthHandler(base_url) + + logger.debug(f"Attempting to refresh OAuth token for {server_name}...") + new_tokens = await oauth_handler.refresh_access_token(refresh_token) + + if not new_tokens or "access_token" not in new_tokens: + logger.error(f"Token refresh failed for {server_name}") + return None + + # Store the new tokens + new_stored = StoredToken( + token_type="oauth", + name=server_name, + data={ + "access_token": new_tokens["access_token"], + "refresh_token": new_tokens.get("refresh_token", refresh_token), + }, + ) + token_store._store_raw( + f"oauth:{server_name}", json.dumps(new_stored.model_dump()) + ) + + logger.info(f"OAuth token refreshed successfully for {server_name}") + + return {"Authorization": f"Bearer {new_tokens['access_token']}"} + + except Exception as e: + logger.error( + f"OAuth token refresh failed for {server_name}: {e}", exc_info=True + ) + return None + + return refresh_oauth_token diff --git a/src/mcp_cli/tools/dynamic_tools.py b/src/mcp_cli/tools/dynamic_tools.py new file mode 100644 index 00000000..c17efcdc --- /dev/null +++ b/src/mcp_cli/tools/dynamic_tools.py @@ -0,0 +1,255 @@ +# mcp_cli/tools/dynamic_tools.py +"""Dynamic tools for on-demand tool discovery and binding. + +This module provides dynamic tools that allow the LLM to discover and load +tool schemas on-demand, rather than loading all tools upfront. + +This is a thin wrapper around chuk-tool-processor's BaseDynamicToolProvider, +adding mcp-cli specific features: +- Integration with ToolManager for tool execution +- State-aware search filtering (blocking tools that need computed values) +- Result unwrapping for MCP tool responses +""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Any + +from chuk_tool_processor.discovery import ( + BaseDynamicToolProvider, + SearchResult, +) + +# Import directly from chuk-ai-session-manager +from chuk_ai_session_manager.guards import get_tool_state +from mcp_cli.tools.models import ToolInfo + +if TYPE_CHECKING: + from mcp_cli.tools.manager import ToolManager + +logger = logging.getLogger(__name__) + +__all__ = ["DynamicToolProvider"] + + +# Tool metadata: tools that require computed values before they can be called +# These will be hidden/downranked in search results until values exist in state +PARAMETERIZED_TOOLS: dict[str, dict[str, Any]] = { + "normal_cdf": {"requires_computed_values": True, "category": "statistics"}, + "normal_pdf": {"requires_computed_values": True, "category": "statistics"}, + "normal_sf": {"requires_computed_values": True, "category": "statistics"}, + "t_test": {"requires_computed_values": True, "category": "statistics"}, + "chi_square": {"requires_computed_values": True, "category": "statistics"}, + # These compute tools don't require pre-computed values + "sqrt": {"requires_computed_values": False, "category": "compute"}, + "add": {"requires_computed_values": False, "category": "compute"}, + "subtract": {"requires_computed_values": False, "category": "compute"}, + "multiply": {"requires_computed_values": False, "category": "compute"}, + "divide": {"requires_computed_values": False, "category": "compute"}, +} + + +class DynamicToolProvider(BaseDynamicToolProvider[ToolInfo]): + """MCP-CLI specific dynamic tool provider. + + Extends BaseDynamicToolProvider with: + - ToolManager integration for MCP tool execution + - State-aware filtering (blocks tools requiring computed values) + - MCP result unwrapping + + ENHANCED: Uses intelligent search engine from chuk-tool-processor with: + - Synonym expansion for natural language queries + - Tokenized OR semantics (partial matches score) + - Fuzzy matching fallback for typos + - Namespace aliasing for flexible tool resolution + - Always returns results (never empty) + """ + + def __init__(self, tool_manager: ToolManager) -> None: + """Initialize with a tool manager. + + Args: + tool_manager: ToolManager instance to query for tools + """ + super().__init__() + self.tool_manager = tool_manager + + # ========================================================================= + # Required implementations from BaseDynamicToolProvider + # ========================================================================= + + async def get_all_tools(self) -> list[ToolInfo]: + """Get all available tools from the tool manager. + + Returns: + List of ToolInfo objects + """ + return await self.tool_manager.get_all_tools() + + async def execute_tool( + self, + tool_name: str, + arguments: dict[str, Any], + ) -> dict[str, Any]: + """Execute a tool via the tool manager. + + Handles MCP-specific result unwrapping. + + Args: + tool_name: Name of the tool to execute + arguments: Arguments to pass to the tool + + Returns: + Execution result dict + """ + try: + result = await self.tool_manager.execute_tool( + tool_name=tool_name, + arguments=arguments, + namespace=None, # Let tool manager figure out the namespace + timeout=None, # Use default timeout + ) + + if result.success: + logger.info( + f"call_tool('{tool_name}') succeeded, " + f"result type: {type(result.result)}" + ) + # Extract and unwrap the actual result value + actual_result = self._unwrap_result(result.result) + + # Format the result for the LLM + try: + formatted_result = self.tool_manager.format_tool_response( + actual_result + ) + logger.debug(f"Formatted result: {formatted_result}") + return { + "success": True, + "result": formatted_result, + } + except Exception as fmt_error: + logger.error(f"Error formatting result: {fmt_error}", exc_info=True) + return { + "success": True, + "result": str(actual_result), + } + else: + logger.warning(f"call_tool('{tool_name}') failed: {result.error}") + return { + "success": False, + "error": result.error or "Tool execution failed", + } + + except Exception as e: + logger.error(f"Error executing tool '{tool_name}': {e}", exc_info=True) + return { + "success": False, + "error": str(e), + } + + # ========================================================================= + # MCP-CLI specific customizations + # ========================================================================= + + def filter_search_results( + self, + results: list[SearchResult[ToolInfo]], + ) -> list[SearchResult[ToolInfo]]: + """Filter search results based on state. + + Parameterized tools (normal_cdf, etc.) are blocked/downranked + until computed values exist in state. + + Args: + results: Search results from the engine + + Returns: + Filtered/modified results + """ + # Check if computed values exist in state + tool_state = get_tool_state() + has_computed_values = bool(tool_state.bindings.bindings) + + filtered: list[SearchResult[ToolInfo]] = [] + + for sr in results: + # Get base tool name for metadata lookup + tool_name = sr.tool.name + base_name = ( + tool_name.split(".")[-1].lower() + if "." in tool_name + else tool_name.lower() + ) + tool_meta = PARAMETERIZED_TOOLS.get(base_name, {}) + + # Check if tool requires computed values but none exist + requires_values = tool_meta.get("requires_computed_values", False) + blocked = requires_values and not has_computed_values + + if blocked: + # Add blocked info to match reasons and heavily penalize score + sr.score *= 0.1 + sr.match_reasons.append("blocked:requires_computed_values") + sr.match_reasons.append( + "hint:Compute values with sqrt, multiply, divide first" + ) + + filtered.append(sr) + + # Re-sort by adjusted score + filtered.sort(key=lambda r: r.score, reverse=True) + return filtered + + def _unwrap_result(self, result: Any) -> Any: + """Unwrap nested ToolResult/dict structures from MCP responses. + + MCP tools can return deeply nested result structures. + This method extracts the actual value. + + Args: + result: Raw result from tool execution + + Returns: + Unwrapped result value + """ + actual_result = result + max_depth = 5 + + for _ in range(max_depth): + # Check for ToolExecutionResult from middleware (has success/error attrs) + if hasattr(actual_result, "success") and hasattr(actual_result, "error"): + if not actual_result.success: + logger.warning( + f"Inner tool execution failed: {actual_result.error}" + ) + # Return the error structure for caller to handle + return actual_result + actual_result = actual_result.result + logger.debug( + f"Unwrapped ToolExecutionResult, new type: {type(actual_result)}" + ) + + # If it's a ToolResult (MCP), extract the result field + elif hasattr(actual_result, "result"): + actual_result = actual_result.result + logger.debug(f"Unwrapped ToolResult, new type: {type(actual_result)}") + + # If it's a dict with 'content' key, extract content + elif isinstance(actual_result, dict) and "content" in actual_result: + actual_result = actual_result["content"] + logger.debug(f"Extracted 'content', new type: {type(actual_result)}") + + # Handle MCP ToolResult object (has .content attribute with list) + elif hasattr(actual_result, "content") and isinstance( + actual_result.content, list + ): + actual_result = actual_result.content + logger.debug( + f"Extracted .content from ToolResult, new type: {type(actual_result)}" + ) + else: + break + + return actual_result diff --git a/src/mcp_cli/tools/execution.py b/src/mcp_cli/tools/execution.py new file mode 100644 index 00000000..a299cb44 --- /dev/null +++ b/src/mcp_cli/tools/execution.py @@ -0,0 +1,205 @@ +# mcp_cli/tools/execution.py +"""Parallel and streaming tool execution utilities. + +Provides async-native parallel execution with callbacks for tool calls. +Uses chuk-tool-processor's ToolCall/ToolResult models. +""" + +from __future__ import annotations + +import asyncio +import logging +import os +import platform +from collections.abc import AsyncIterator, Awaitable, Callable +from datetime import UTC, datetime +from typing import TYPE_CHECKING + +from chuk_tool_processor import ToolCall as CTPToolCall +from chuk_tool_processor import ToolResult as CTPToolResult + +if TYPE_CHECKING: + from mcp_cli.tools.manager import ToolManager + +logger = logging.getLogger(__name__) + + +async def execute_tools_parallel( + manager: ToolManager, + calls: list[CTPToolCall], + timeout: float | None = None, + on_tool_start: Callable[[CTPToolCall], Awaitable[None]] | None = None, + on_tool_result: Callable[[CTPToolResult], Awaitable[None]] | None = None, + max_concurrency: int = 4, +) -> list[CTPToolResult]: + """ + Execute multiple tool calls in parallel with optional callbacks. + + Uses chuk-tool-processor's ToolCall/ToolResult models for consistency. + Results are returned in completion order (faster tools return first). + + Args: + manager: ToolManager instance to execute tools + calls: List of CTPToolCall objects to execute + timeout: Timeout per tool execution (uses default if not specified) + on_tool_start: Async callback invoked when each tool starts + on_tool_result: Async callback invoked when each tool completes + max_concurrency: Maximum concurrent executions (default: 4) + + Returns: + List of CTPToolResult objects in completion order + """ + if not calls: + return [] + + effective_timeout = timeout or manager.tool_timeout + sem = asyncio.Semaphore(max_concurrency) + results: list[CTPToolResult] = [] + + async def execute_single(call: CTPToolCall) -> CTPToolResult: + """Execute a single tool call with semaphore control.""" + start_time = datetime.now(UTC) + + async with sem: + # Invoke start callback + if on_tool_start: + try: + await on_tool_start(call) + except Exception as e: + logger.warning( + f"on_tool_start callback failed for {call.tool}: {e}" + ) + + # Execute the tool + tool_result = await manager.execute_tool( + call.tool, + call.arguments, + namespace=call.namespace if call.namespace != "default" else None, + timeout=effective_timeout, + ) + + end_time = datetime.now(UTC) + + # Convert ToolCallResult to CTPToolResult + ctp_result = CTPToolResult( + id=call.id, + tool=call.tool, + result=tool_result.result if tool_result.success else None, + error=tool_result.error if not tool_result.success else None, + start_time=start_time, + end_time=end_time, + machine=platform.node(), + pid=os.getpid(), + ) + + # Invoke result callback + if on_tool_result: + try: + await on_tool_result(ctp_result) + except Exception as e: + logger.warning( + f"on_tool_result callback failed for {call.tool}: {e}" + ) + + return ctp_result + + # Create all tasks and execute in parallel + tasks = [asyncio.create_task(execute_single(call)) for call in calls] + + # Use as_completed to get results in completion order + for completed_task in asyncio.as_completed(tasks): + result = await completed_task + results.append(result) + + return results + + +async def stream_execute_tools( + manager: ToolManager, + calls: list[CTPToolCall], + timeout: float | None = None, + on_tool_start: Callable[[CTPToolCall], Awaitable[None]] | None = None, + max_concurrency: int = 4, +) -> AsyncIterator[CTPToolResult]: + """ + Execute multiple tool calls in parallel, yielding results as they complete. + + This is the streaming version of execute_tools_parallel - results are + yielded immediately when each tool completes, without waiting for all. + + Args: + manager: ToolManager instance to execute tools + calls: List of CTPToolCall objects to execute + timeout: Timeout per tool execution (uses default if not specified) + on_tool_start: Async callback invoked when each tool starts + max_concurrency: Maximum concurrent executions (default: 4) + + Yields: + CTPToolResult objects as each tool completes (in completion order) + """ + if not calls: + return + + effective_timeout = timeout or manager.tool_timeout + sem = asyncio.Semaphore(max_concurrency) + queue: asyncio.Queue[CTPToolResult] = asyncio.Queue() + + async def execute_single(call: CTPToolCall) -> None: + """Execute a single tool call and put result in queue.""" + start_time = datetime.now(UTC) + + async with sem: + # Invoke start callback + if on_tool_start: + try: + await on_tool_start(call) + except Exception as e: + logger.warning( + f"on_tool_start callback failed for {call.tool}: {e}" + ) + + # Execute the tool + tool_result = await manager.execute_tool( + call.tool, + call.arguments, + namespace=call.namespace if call.namespace != "default" else None, + timeout=effective_timeout, + ) + + end_time = datetime.now(UTC) + + # Convert ToolCallResult to CTPToolResult + ctp_result = CTPToolResult( + id=call.id, + tool=call.tool, + result=tool_result.result if tool_result.success else None, + error=tool_result.error if not tool_result.success else None, + start_time=start_time, + end_time=end_time, + machine=platform.node(), + pid=os.getpid(), + ) + + await queue.put(ctp_result) + + # Start all tasks + tasks = {asyncio.create_task(execute_single(call)) for call in calls} + + # Yield results as they complete + results_received = 0 + while results_received < len(calls): + try: + result = await queue.get() + yield result + results_received += 1 + except asyncio.CancelledError: + # Cancel remaining tasks on cancellation + for task in tasks: + if not task.done(): + task.cancel() + break + + # Clean up any remaining tasks + done, pending = await asyncio.wait(tasks, timeout=0) + for task in pending: + task.cancel() diff --git a/src/mcp_cli/tools/filter.py b/src/mcp_cli/tools/filter.py index 29b4469f..ca9a0fe6 100644 --- a/src/mcp_cli/tools/filter.py +++ b/src/mcp_cli/tools/filter.py @@ -1,19 +1,66 @@ # mcp_cli/tools/filter.py -""" -from __future__ import annotations +"""Tool filtering and management system - async native, pydantic native! -Tool filtering and management system. AGGRESSIVE AUTO-FIX: Always attempt to fix tools before validation. """ +from __future__ import annotations + import logging +from enum import Enum from typing import Any +from pydantic import BaseModel, Field + from mcp_cli.tools.validation import ToolSchemaValidator logger = logging.getLogger(__name__) +class DisabledReason(str, Enum): + """Reason why a tool was disabled - no magic strings!""" + + VALIDATION = "validation" # Failed validation + USER = "user" # Manually disabled by user + UNKNOWN = "unknown" # Unknown reason + + +class FilterStats(BaseModel): + """Auto-fix statistics - no dict goop!""" + + attempted: int = Field(default=0, description="Number of fix attempts") + successful: int = Field(default=0, description="Number of successful fixes") + failed: int = Field(default=0, description="Number of failed fixes") + + model_config = {"frozen": False} + + def increment_attempted(self) -> None: + """Increment attempted counter.""" + self.attempted += 1 + + def increment_successful(self) -> None: + """Increment successful counter.""" + self.successful += 1 + + def increment_failed(self) -> None: + """Increment failed counter.""" + self.failed += 1 + + def reset(self) -> None: + """Reset all counters.""" + self.attempted = 0 + self.successful = 0 + self.failed = 0 + + def to_dict(self) -> dict[str, int]: + """Convert to dict for compatibility.""" + return { + "attempted": self.attempted, + "successful": self.successful, + "failed": self.failed, + } + + class ToolFilter: """Manages tool filtering and disabling based on various criteria.""" @@ -23,20 +70,22 @@ def __init__(self) -> None: self.disabled_by_user: set[str] = set() self.auto_fix_enabled: bool = True self._validation_cache: dict[str, tuple[bool, str | None]] = {} - self._fix_stats: dict[str, int] = {"attempted": 0, "successful": 0, "failed": 0} + self._fix_stats = FilterStats() # Use Pydantic model instead of dict! def is_tool_enabled(self, tool_name: str) -> bool: """Check if a tool is enabled (not disabled).""" return tool_name not in self.disabled_tools - def disable_tool(self, tool_name: str, reason: str = "user") -> None: - """Disable a tool for a specific reason.""" + def disable_tool( + self, tool_name: str, reason: DisabledReason = DisabledReason.USER + ) -> None: + """Disable a tool for a specific reason - uses enum, no magic strings!""" self.disabled_tools.add(tool_name) - if reason == "validation": + if reason == DisabledReason.VALIDATION: self.disabled_by_validation.add(tool_name) - elif reason == "user": + elif reason == DisabledReason.USER: self.disabled_by_user.add(tool_name) - logger.info(f"Disabled tool '{tool_name}' (reason: {reason})") + logger.info(f"Disabled tool '{tool_name}' (reason: {reason.value})") def enable_tool(self, tool_name: str) -> None: """Re-enable a previously disabled tool.""" @@ -46,19 +95,22 @@ def enable_tool(self, tool_name: str) -> None: logger.info(f"Enabled tool '{tool_name}'") def get_disabled_tools(self) -> dict[str, str]: - """Get all disabled tools with their reasons.""" + """Get all disabled tools with their reasons - uses enum values!""" result = {} for tool in self.disabled_by_validation: - result[tool] = "validation" + result[tool] = DisabledReason.VALIDATION.value for tool in self.disabled_by_user: - result[tool] = "user" + result[tool] = DisabledReason.USER.value return result - def get_disabled_tools_by_reason(self, reason: str) -> set: - """Get disabled tools by specific reason.""" - if reason == "validation": + def get_disabled_tools_by_reason(self, reason: DisabledReason | str) -> set[str]: + """Get disabled tools by specific reason - accepts enum or string!""" + # Support both enum and string for backward compatibility + reason_value = reason.value if isinstance(reason, DisabledReason) else reason + + if reason_value == DisabledReason.VALIDATION.value: return self.disabled_by_validation.copy() - elif reason == "user": + elif reason_value == DisabledReason.USER.value: return self.disabled_by_user.copy() return set() @@ -67,7 +119,7 @@ def clear_validation_disabled(self) -> None: self.disabled_tools -= self.disabled_by_validation self.disabled_by_validation.clear() self._validation_cache.clear() - self._fix_stats = {"attempted": 0, "successful": 0, "failed": 0} + self._fix_stats.reset() # Use model method instead of dict assignment! logger.info("Cleared all validation-disabled tools") def filter_tools( @@ -92,7 +144,7 @@ def filter_tools( { **tool, "_disabled_reason": self.get_disabled_tools().get( - tool_name, "unknown" + tool_name, DisabledReason.UNKNOWN.value ), } ) @@ -101,7 +153,7 @@ def filter_tools( # For OpenAI, use comprehensive validation and fixing if provider == "openai": if self.auto_fix_enabled: - self._fix_stats["attempted"] += 1 + self._fix_stats.increment_attempted() # Use model method! # Use the comprehensive validate_and_fix method is_valid, fixed_tool, error_msg = ( @@ -109,7 +161,7 @@ def filter_tools( ) if is_valid: - self._fix_stats["successful"] += 1 + self._fix_stats.increment_successful() # Use model method! # Check if the tool was actually modified if fixed_tool != tool: @@ -120,18 +172,18 @@ def filter_tools( valid_tools.append(fixed_tool) continue else: - self._fix_stats["failed"] += 1 + self._fix_stats.increment_failed() # Use model method! logger.warning( f"Tool '{tool_name}' failed validation even after auto-fix: {error_msg}" ) - # Disable invalid tool - self.disable_tool(tool_name, "validation") + # Disable invalid tool - use enum! + self.disable_tool(tool_name, DisabledReason.VALIDATION) invalid_tools.append( { **tool, "_validation_error": error_msg, - "_disabled_reason": "validation", + "_disabled_reason": DisabledReason.VALIDATION.value, } ) else: @@ -144,22 +196,24 @@ def filter_tools( logger.warning( f"Tool '{tool_name}' failed validation: {validation.error_message}" ) - self.disable_tool(tool_name, "validation") + self.disable_tool( + tool_name, DisabledReason.VALIDATION + ) # Use enum! invalid_tools.append( { **tool, "_validation_error": validation.error_message, - "_disabled_reason": "validation", + "_disabled_reason": DisabledReason.VALIDATION.value, } ) else: # For other providers, assume valid for now valid_tools.append(tool) - # Log fix statistics - if self._fix_stats["attempted"] > 0: + # Log fix statistics - use model properties! + if self._fix_stats.attempted > 0: logger.info( - f"Auto-fix results: {self._fix_stats['successful']}/{self._fix_stats['attempted']} tools fixed successfully" + f"Auto-fix results: {self._fix_stats.successful}/{self._fix_stats.attempted} tools fixed successfully" ) return valid_tools, invalid_tools @@ -173,24 +227,23 @@ def _extract_tool_name(self, tool: dict[str, Any]) -> str: return tool_name def get_validation_summary(self) -> dict[str, Any]: - """Get a summary of validation results.""" + """Get a summary of validation results - uses model!""" return { "total_disabled": len(self.disabled_tools), "disabled_by_validation": len(self.disabled_by_validation), "disabled_by_user": len(self.disabled_by_user), "auto_fix_enabled": self.auto_fix_enabled, "cache_size": len(self._validation_cache), - "fix_stats": self._fix_stats.copy(), + "fix_stats": self._fix_stats.to_dict(), # Use model method! } def get_fix_statistics(self) -> dict[str, int]: - """Get auto-fix statistics.""" - stats: dict[str, int] = self._fix_stats.copy() - return stats + """Get auto-fix statistics - uses model!""" + return self._fix_stats.to_dict() # Use model method! def reset_statistics(self) -> None: - """Reset fix statistics.""" - self._fix_stats = {"attempted": 0, "successful": 0, "failed": 0} + """Reset fix statistics - uses model method!""" + self._fix_stats.reset() # Use model method! def set_auto_fix_enabled(self, enabled: bool) -> None: """Enable or disable auto-fixing.""" diff --git a/src/mcp_cli/tools/manager.py b/src/mcp_cli/tools/manager.py index 3523efb4..70d97d96 100644 --- a/src/mcp_cli/tools/manager.py +++ b/src/mcp_cli/tools/manager.py @@ -1,34 +1,84 @@ # mcp_cli/tools/manager.py """ -Slim ToolManager - orchestrates chuk-tool-processor with mcp-cli features. +ToolManager - orchestrates chuk-tool-processor with mcp-cli features. -Slimmed from 2000+ lines to ~600 lines by: -1. Delegating to StreamManager for all tool operations -2. Keeping only value-add: config parsing, OAuth, filtering, LLM adaptation -3. Removing unused methods and pure pass-through wrappers +Responsibilities: +1. Parse MCP config files and detect server types (HTTP/SSE/STDIO) +2. Integrate with mcp-cli's OAuth TokenManager +3. Filter and validate tools for LLM compatibility +4. Convert between chuk and mcp-cli data models +5. Configure production middleware (retry, circuit breaker, rate limiting) -For direct StreamManager access: tool_manager.stream_manager.method() +For direct tool operations, use the exposed stream_manager property. """ from __future__ import annotations -import json +import asyncio import logging -from typing import Any, cast +import os +from collections.abc import AsyncIterator, Awaitable, Callable +from typing import Any from chuk_tool_processor import StreamManager, ToolProcessor - -from mcp_cli.auth import TokenManager, TokenStoreBackend -from mcp_cli.constants import NAMESPACE -from mcp_cli.tools.filter import ToolFilter -from mcp_cli.tools.models import ServerInfo, ToolCallResult, ToolInfo, TransportType +from chuk_tool_processor import ToolCall as CTPToolCall +from chuk_tool_processor import ToolResult as CTPToolResult +from chuk_tool_processor.mcp import MiddlewareConfig + +from mcp_cli.auth import ( + OAuthHandler, + StoredToken, + TokenManager, + TokenStoreBackend, + TokenType, +) +from mcp_cli.config import RuntimeConfig, TimeoutType, load_runtime_config +from mcp_cli.config.defaults import DEFAULT_MIDDLEWARE_ENABLED +from mcp_cli.config import ServerStatus +from mcp_cli.llm.content_models import ContentBlockType +from mcp_cli.tools.config_loader import ConfigLoader +from mcp_cli.tools.dynamic_tools import DynamicToolProvider +from mcp_cli.tools.execution import ( + execute_tools_parallel as _execute_tools_parallel, + stream_execute_tools as _stream_execute_tools, +) +from mcp_cli.tools.filter import DisabledReason, ToolFilter +from mcp_cli.tools.models import ( + ServerInfo, + ToolCallResult, + ToolDefinitionInput, + ToolInfo, + TransportType, +) logger = logging.getLogger(__name__) +# ────────────────────────────────────────────────────────────────────────────── +# OAuth Error Detection Patterns +# ────────────────────────────────────────────────────────────────────────────── +OAUTH_ERROR_PATTERNS = [ + "requires OAuth authorization", + "requires oauth authorization", + "OAuth authorization required", + "oauth authorization required", + "authentication required", + "Authorization required", + "unauthorized", + "401", +] + + +def _is_oauth_error(error_message: str) -> bool: + """Check if an error message indicates OAuth authorization is needed.""" + if not error_message: + return False + error_lower = error_message.lower() + return any(pattern.lower() in error_lower for pattern in OAUTH_ERROR_PATTERNS) + class ToolManager: """ - Slim facade over chuk-tool-processor with mcp-cli specific features. + Facade over chuk-tool-processor with mcp-cli specific features. Responsibilities: 1. Parse MCP config files and detect server types (HTTP/SSE/STDIO) @@ -47,59 +97,77 @@ def __init__( tool_timeout: float | None = None, max_concurrency: int = 4, initialization_timeout: float = 120.0, + runtime_config: RuntimeConfig | None = None, + middleware_config: MiddlewareConfig | None = None, + middleware_enabled: bool = DEFAULT_MIDDLEWARE_ENABLED, ): self.config_file = config_file self.servers = servers self.server_names = server_names or {} - self.tool_timeout = tool_timeout or 30.0 + + # Use runtime config for timeout management (type-safe!) + self.runtime_config = runtime_config or load_runtime_config() + + # Tool timeout with priority: param > runtime_config + if tool_timeout is not None: + self.tool_timeout = tool_timeout + else: + self.tool_timeout = self.runtime_config.get_timeout( + TimeoutType.TOOL_EXECUTION + ) + + # Initialization timeout with priority: param > runtime_config + if initialization_timeout != 120.0: # User provided non-default + self.initialization_timeout = initialization_timeout + else: + self.initialization_timeout = self.runtime_config.get_timeout( + TimeoutType.SERVER_INIT + ) + self.max_concurrency = max_concurrency - self.initialization_timeout = initialization_timeout + + # Middleware configuration (retry, circuit breaker, rate limiting) + self._middleware_enabled = middleware_enabled + self._middleware_config = middleware_config + + logger.debug( + f"ToolManager initialized with timeouts: " + f"tool={self.tool_timeout}s, init={self.initialization_timeout}s, " + f"middleware_enabled={middleware_enabled}" + ) # chuk-tool-processor components (publicly accessible) self.stream_manager: StreamManager | None = None self.processor: ToolProcessor | None = None self._registry = None + # Config loader handles parsing and OAuth + self._config_loader = ConfigLoader(config_file, servers) + # mcp-cli features self.tool_filter = ToolFilter() - self._token_manager: TokenManager - self._config_cache: dict[str, Any] | None = None - - # Server detection results - self._http_servers: list[dict[str, Any]] = [] - self._sse_servers: list[dict[str, Any]] = [] - self._stdio_servers: list[dict[str, Any]] = [] - - # Setup OAuth - self._token_manager = TokenManager( - backend=TokenStoreBackend.AUTO, - namespace=NAMESPACE, - service_name="mcp-cli", - ) + + # Setup dynamic tool provider for on-demand tool discovery + self.dynamic_tool_provider = DynamicToolProvider(self) # ================================================================ # Initialization # ================================================================ async def initialize(self, namespace: str = "stdio") -> bool: - """ - Initialize by parsing config, setting up OAuth, and creating StreamManager. - """ + """Initialize by parsing config, setting up OAuth, and creating StreamManager.""" try: from chuk_term.ui import output # Load config and detect server types - config = self._load_config() + config = self._config_loader.load() if not config: output.warning("No config found, initializing with empty toolset") return await self._setup_empty_toolset() - self._detect_server_types(config) + self._config_loader.detect_server_types(config) - # Process OAuth - await self._process_oauth_for_servers(config) - - # Initialize StreamManager based on detected types + # Initialize StreamManager based on detected type success = await self._initialize_stream_manager(namespace) if success and self.stream_manager: @@ -118,39 +186,116 @@ async def initialize(self, namespace: str = "stdio") -> bool: return False async def _initialize_stream_manager(self, namespace: str) -> bool: - """Initialize StreamManager with detected transport type.""" + """Initialize StreamManager with detected transport type. + + ENHANCED: Initializes different transport types in parallel for faster startup. + """ self.stream_manager = StreamManager() + http_servers = self._config_loader.http_servers + sse_servers = self._config_loader.sse_servers + stdio_servers = self._config_loader.stdio_servers + + if not (http_servers or sse_servers or stdio_servers): + logger.info("No servers detected") + return True + try: - # Initialize all server types (not mutually exclusive) - # Changed from elif to if statements to support multiple transport types - if self._http_servers: - logger.info(f"Initializing {len(self._http_servers)} HTTP servers") - await self.stream_manager.initialize_with_http_streamable( - servers=self._http_servers, - server_names=self.server_names, - initialization_timeout=self.initialization_timeout, - oauth_refresh_callback=self._create_oauth_refresh_callback(), + # Build initialization tasks for parallel execution + init_tasks: list[asyncio.Task[None]] = [] + task_names: list[str] = [] + + # Create OAuth callback once (shared by HTTP and SSE) + oauth_callback = self._config_loader.create_oauth_refresh_callback( + http_servers, sse_servers + ) + + if http_servers: + logger.info(f"Preparing {len(http_servers)} HTTP servers for init") + http_dicts = [ + {"name": s.name, "url": s.url, "headers": s.headers or {}} + for s in http_servers + ] + task = asyncio.create_task( + self.stream_manager.initialize_with_http_streamable( + servers=http_dicts, + server_names=self.server_names, + initialization_timeout=self.initialization_timeout, + oauth_refresh_callback=oauth_callback, + ), + name="init_http", ) - if self._sse_servers: - logger.info(f"Initializing {len(self._sse_servers)} SSE servers") - await self.stream_manager.initialize_with_sse( - servers=self._sse_servers, - server_names=self.server_names, - initialization_timeout=self.initialization_timeout, - oauth_refresh_callback=self._create_oauth_refresh_callback(), + init_tasks.append(task) + task_names.append("HTTP") + + if sse_servers: + logger.info(f"Preparing {len(sse_servers)} SSE servers for init") + sse_dicts = [ + {"name": s.name, "url": s.url, "headers": s.headers or {}} + for s in sse_servers + ] + task = asyncio.create_task( + self.stream_manager.initialize_with_sse( + servers=sse_dicts, + server_names=self.server_names, + initialization_timeout=self.initialization_timeout, + oauth_refresh_callback=oauth_callback, + ), + name="init_sse", ) - if self._stdio_servers: - logger.info(f"Initializing {len(self._stdio_servers)} STDIO servers") - await self.stream_manager.initialize_with_stdio( - servers=self._stdio_servers, - server_names=self.server_names, - initialization_timeout=self.initialization_timeout, + init_tasks.append(task) + task_names.append("SSE") + + if stdio_servers: + logger.info(f"Preparing {len(stdio_servers)} STDIO servers for init") + stdio_dicts = [ + { + "name": s.name, + "command": s.command, + "args": s.args, + "env": s.env, + } + for s in stdio_servers + ] + task = asyncio.create_task( + self.stream_manager.initialize_with_stdio( + servers=stdio_dicts, + server_names=self.server_names, + initialization_timeout=self.initialization_timeout, + ), + name="init_stdio", ) + init_tasks.append(task) + task_names.append("STDIO") - if not (self._http_servers or self._sse_servers or self._stdio_servers): - logger.info("No servers detected") - return True + # Run all transport initializations in parallel + if init_tasks: + logger.info( + f"Starting parallel initialization of {len(init_tasks)} transport types: {', '.join(task_names)}" + ) + results = await asyncio.gather(*init_tasks, return_exceptions=True) + + # Check for errors + errors = [] + for name, result in zip(task_names, results): + if isinstance(result, Exception): + errors.append(f"{name}: {result}") + logger.error(f"{name} initialization failed: {result}") + + if errors: + # Log errors but don't fail if at least one transport succeeded + logger.warning( + f"Some transports failed to initialize: {'; '.join(errors)}" + ) + + logger.info("Parallel server initialization complete") + + # Enable middleware if configured (retry, circuit breaker, rate limiting) + if self._middleware_enabled and self.stream_manager: + self.stream_manager.enable_middleware(self._middleware_config) + logger.info( + "CTP middleware enabled (retry, circuit breaker, rate limiting)" + ) return True @@ -174,112 +319,6 @@ async def close(self) -> None: except Exception as e: logger.warning(f"Error closing stream_manager: {e}") - # ================================================================ - # Config Parsing - # ================================================================ - - def _load_config(self) -> dict[str, Any]: - """Load and parse MCP config file with token resolution.""" - if self._config_cache: - return self._config_cache - - try: - with open(self.config_file) as f: - config = cast(dict[str, Any], json.load(f)) - - # Resolve {{token:provider}} placeholders - self._resolve_token_placeholders(config) - - self._config_cache = config - return config - - except FileNotFoundError: - logger.warning(f"Config file not found: {self.config_file}") - return {} - except json.JSONDecodeError as e: - logger.error(f"Invalid JSON in config: {e}") - return {} - except Exception as e: - logger.error(f"Error loading config: {e}") - return {} - - def _resolve_token_placeholders(self, config: dict[str, Any]) -> None: - """Replace {{token:provider}} with actual OAuth tokens.""" - - # Recursive function to process nested dicts - def process_value(value: Any) -> Any: - if isinstance(value, str) and value.startswith("{{token:"): - # Extract provider name - provider = value[8:-2] # Remove {{token: and }} - try: - token = self._token_manager.get_token(provider) - if token: - return f"Bearer {token.access_token}" - except Exception as e: - logger.warning(f"Failed to get token for {provider}: {e}") - elif isinstance(value, dict): - return {k: process_value(v) for k, v in value.items()} - elif isinstance(value, list): - return [process_value(item) for item in value] - return value - - # Process entire config - if "mcpServers" in config: - config["mcpServers"] = process_value(config["mcpServers"]) - - def _detect_server_types(self, config: dict[str, Any]) -> None: - """Detect HTTP/SSE/STDIO servers from config.""" - mcp_servers = config.get("mcpServers", {}) - - for server_name in self.servers: - if server_name not in mcp_servers: - logger.warning(f"Server '{server_name}' not found in config") - continue - - server_cfg = mcp_servers[server_name] - - # Detect transport type - if "url" in server_cfg: - transport = server_cfg.get("transport", "").lower() - server_config = { - "name": server_name, - "url": server_cfg["url"], - "headers": server_cfg.get("headers", {}), - } - - if "sse" in transport: - self._sse_servers.append(server_config) - else: - self._http_servers.append(server_config) - else: - # STDIO server - self._stdio_servers.append( - { - "name": server_name, - "command": server_cfg.get("command"), - "args": server_cfg.get("args", []), - "env": server_cfg.get("env", {}), - } - ) - - # ================================================================ - # OAuth Integration - # ================================================================ - - async def _process_oauth_for_servers(self, config: dict[str, Any]) -> None: - """Pre-fetch OAuth tokens for servers that need them.""" - # This is a simplified version - full OAuth logic can be added if needed - pass - - def _create_oauth_refresh_callback(self): - """Create OAuth token refresh callback for StreamManager.""" - - async def refresh_oauth_token(): - # Simplified - can be expanded with actual refresh logic - logger.debug("OAuth refresh callback triggered") - - return refresh_oauth_token - # ================================================================ # Tool Access (with ToolInfo conversion) # ================================================================ @@ -300,7 +339,6 @@ async def get_all_tools(self) -> list[ToolInfo]: logger.debug(f"Failed to get metadata for {name}: {e}") metadata = None - # Create ToolInfo even if metadata is missing tools.append( ToolInfo( name=name, @@ -320,11 +358,26 @@ async def get_all_tools(self) -> list[ToolInfo]: return [] try: - # Get tools from StreamManager tools_dict = self.stream_manager.get_all_tools() - - # Convert to ToolInfo - return [self._convert_to_tool_info(t) for t in tools_dict] + # Get tool→server mapping for correct namespace + tool_to_server = getattr(self.stream_manager, "tool_to_server_map", {}) + tools = [] + for t in tools_dict: + tool_info = self._convert_to_tool_info(t) + # Override namespace with server name from tool_to_server_map + server_name = tool_to_server.get(tool_info.name) + if server_name: + tool_info = ToolInfo( + name=tool_info.name, + namespace=server_name, + description=tool_info.description, + parameters=tool_info.parameters, + is_async=tool_info.is_async, + tags=tool_info.tags, + supports_streaming=tool_info.supports_streaming, + ) + tools.append(tool_info) + return tools except Exception as e: logger.error(f"Error getting tools: {e}") return [] @@ -348,12 +401,10 @@ async def get_tool_by_name( all_tools = await self.get_all_tools() if namespace: - # Filter by namespace first for tool in all_tools: if tool.name == tool_name and tool.namespace == namespace: return tool else: - # Return first match for tool in all_tools: if tool.name == tool_name: return tool @@ -362,55 +413,197 @@ async def get_tool_by_name( @staticmethod def format_tool_response(response: Any) -> str: - """ - Format a tool response for display. - - Handles MCP text records, JSON data, dicts, and scalars. - """ + """Format a tool response for display.""" import json # Handle list of text records (MCP format) if isinstance(response, list): - # Check if it's a list of text records + from mcp_cli.llm.content_models import TextContent + + try: + text_blocks = [ + TextContent.model_validate(item) + for item in response + if isinstance(item, dict) + and item.get("type") == ContentBlockType.TEXT.value + ] + if text_blocks: + return "\n".join(block.text for block in text_blocks) + except Exception: + pass + if all( - isinstance(item, dict) and item.get("type") == "text" + isinstance(item, dict) + and item.get("type") == ContentBlockType.TEXT.value for item in response ): return "\n".join(item.get("text", "") for item in response) - # Otherwise serialize as JSON return json.dumps(response, indent=2) - # Handle dict if isinstance(response, dict): return json.dumps(response, indent=2) - # Handle scalar values return str(response) def _convert_to_tool_info(self, tool_dict: dict[str, Any]) -> ToolInfo: """Convert chuk tool dict to mcp-cli ToolInfo.""" + tool_input = ToolDefinitionInput.model_validate(tool_dict) + return ToolInfo( - name=tool_dict.get("name", ""), - namespace=tool_dict.get("namespace", "default"), - description=tool_dict.get("description"), - parameters=tool_dict.get("inputSchema", {}), - is_async=tool_dict.get("is_async", False), - tags=tool_dict.get("tags", []), + name=tool_input.name, + namespace=tool_input.namespace, + description=tool_input.description, + parameters=tool_input.inputSchema, + is_async=tool_input.is_async, + tags=tool_input.tags, ) # ================================================================ - # Tool Execution (wraps StreamManager) + # Tool Execution # ================================================================ + def _get_server_url(self, server_name: str) -> str | None: + """Get the URL for an HTTP/SSE server by name. + + Args: + server_name: Name of the server + + Returns: + Server URL or None if not found or not an HTTP server + """ + # Check HTTP servers + for server in self._config_loader.http_servers: + if server.name == server_name: + return server.url + + # Check SSE servers + for server in self._config_loader.sse_servers: + if server.name == server_name: + return server.url + + return None + + async def _handle_oauth_flow(self, server_name: str, server_url: str) -> bool: + """Handle OAuth authentication flow for a server. + + Args: + server_name: Name of the server requiring OAuth + server_url: URL of the server + + Returns: + True if OAuth completed successfully, False otherwise + """ + try: + from chuk_term.ui import output + + output.info(f"🔐 OAuth authorization required for server: {server_name}") + output.info("Opening browser for authentication...") + + # Create token manager with mcp-cli service name + token_manager = TokenManager( + backend=TokenStoreBackend.AUTO, + service_name="mcp-cli", + ) + oauth_handler = OAuthHandler(token_manager=token_manager) + + # Clear any existing tokens - they're clearly invalid since the server + # returned an OAuth error. This forces a fresh browser-based auth flow. + oauth_handler.clear_tokens(server_name) + logger.debug(f"Cleared existing tokens for {server_name} to force re-auth") + + # Perform MCP OAuth flow (discovers metadata, opens browser, gets tokens) + tokens = await oauth_handler.ensure_authenticated_mcp( + server_name=server_name, + server_url=server_url, + ) + + if tokens and tokens.access_token: + # Also store in the format expected by the oauth_refresh_callback + # The refresh callback looks for "oauth:{server_name}" with StoredToken format + import json + + stored = StoredToken( + token_type=TokenType.OAUTH, + name=server_name, + data={ + "access_token": tokens.access_token, + "refresh_token": tokens.refresh_token, + "token_type": tokens.token_type, + "expires_in": tokens.expires_in, + "issued_at": tokens.issued_at, + }, + ) + token_manager.token_store._store_raw( + f"oauth:{server_name}", json.dumps(stored.model_dump()) + ) + logger.debug( + f"Stored OAuth token for refresh callback: oauth:{server_name}" + ) + + # Update the transport's headers so the retry uses the new token + if self.stream_manager and hasattr(self.stream_manager, "transports"): + transport = self.stream_manager.transports.get(server_name) + if transport and hasattr(transport, "configured_headers"): + transport.configured_headers["Authorization"] = ( + f"Bearer {tokens.access_token}" + ) + logger.debug(f"Updated transport headers for {server_name}") + + output.success(f"✅ Successfully authenticated with {server_name}") + logger.info(f"OAuth flow completed for {server_name}") + return True + else: + output.error( + f"❌ OAuth flow did not return valid tokens for {server_name}" + ) + return False + + except Exception as e: + logger.error(f"OAuth flow failed for {server_name}: {e}", exc_info=True) + try: + from chuk_term.ui import output + + output.error(f"❌ OAuth authentication failed: {e}") + except ImportError: + pass + return False + async def execute_tool( self, tool_name: str, arguments: dict[str, Any], namespace: str | None = None, timeout: float | None = None, + _oauth_retry: bool = False, ) -> ToolCallResult: - """Execute tool and return ToolCallResult with automatic recovery on transport errors.""" + """Execute tool and return ToolCallResult. + + When middleware is enabled (default), CTP handles: + - Retry with exponential backoff for transient errors + - Circuit breaker pattern for failing servers + - Rate limiting (if configured) + + OAuth handling: + - If a tool fails with OAuth authorization error, automatically + triggers the OAuth flow and retries the tool call once. + """ + # Check if this is a dynamic tool + if self.dynamic_tool_provider.is_dynamic_tool(tool_name): + logger.info(f"Executing dynamic tool: {tool_name}") + try: + result = await self.dynamic_tool_provider.execute_dynamic_tool( + tool_name, arguments + ) + return ToolCallResult(tool_name=tool_name, success=True, result=result) + except Exception as e: + error_msg = str(e) + logger.error(f"Dynamic tool execution failed: {error_msg}") + return ToolCallResult( + tool_name=tool_name, success=False, error=error_msg + ) + + # Regular MCP tool execution (middleware handles retries if enabled) if not self.stream_manager: return ToolCallResult( tool_name=tool_name, success=False, error="ToolManager not initialized" @@ -423,25 +616,130 @@ async def execute_tool( server_name=namespace, timeout=timeout or self.tool_timeout, ) + + # Check if result contains an OAuth error (some servers return errors in content) + result_str = str(result) if result else "" + if _is_oauth_error(result_str) and not _oauth_retry: + logger.info(f"OAuth error detected in tool result for {tool_name}") + # Determine server name - use namespace or look up from tool + server_name = namespace or await self.get_server_for_tool(tool_name) + if server_name: + server_url = self._get_server_url(server_name) + if server_url: + if await self._handle_oauth_flow(server_name, server_url): + # Retry the tool call once after OAuth + logger.info(f"Retrying tool {tool_name} after OAuth") + return await self.execute_tool( + tool_name, + arguments, + namespace, + timeout, + _oauth_retry=True, + ) + return ToolCallResult(tool_name=tool_name, success=True, result=result) except Exception as e: error_msg = str(e) logger.error(f"Tool execution failed: {error_msg}") - # Check if this is a transport error that might be recoverable - if "Transport not initialized" in error_msg or "transport" in error_msg.lower(): - logger.warning(f"Transport error detected for tool {tool_name}, attempting recovery...") - - # Attempt to recover by reconnecting to the affected server - recovery_result = await self._attempt_transport_recovery( - tool_name, arguments, namespace, timeout + # Check if this is an OAuth error and we haven't already retried + if _is_oauth_error(error_msg) and not _oauth_retry: + logger.info( + f"OAuth error detected for tool {tool_name}, attempting authentication" ) - if recovery_result: - return recovery_result + + # Determine server name - use namespace or look up from tool + server_name = namespace or await self.get_server_for_tool(tool_name) + if server_name: + server_url = self._get_server_url(server_name) + if server_url: + if await self._handle_oauth_flow(server_name, server_url): + # Retry the tool call once after OAuth + logger.info(f"Retrying tool {tool_name} after OAuth") + return await self.execute_tool( + tool_name, + arguments, + namespace, + timeout, + _oauth_retry=True, + ) + else: + return ToolCallResult( + tool_name=tool_name, + success=False, + error=f"OAuth authentication failed for {server_name}. {error_msg}", + ) + else: + logger.warning(f"Could not find URL for server {server_name}") + else: + logger.warning(f"Could not determine server for tool {tool_name}") return ToolCallResult(tool_name=tool_name, success=False, error=error_msg) + async def stream_execute_tool( + self, + tool_name: str, + arguments: dict[str, Any], + namespace: str | None = None, + timeout: float | None = None, + ): + """Stream tool execution results (for tools that support streaming).""" + result = await self.execute_tool(tool_name, arguments, namespace, timeout) + yield result + + async def execute_tools_parallel( + self, + calls: list[CTPToolCall], + timeout: float | None = None, + on_tool_start: Callable[[CTPToolCall], Awaitable[None]] | None = None, + on_tool_result: Callable[[CTPToolResult], Awaitable[None]] | None = None, + max_concurrency: int = 4, + ) -> list[CTPToolResult]: + """Execute multiple tool calls in parallel with optional callbacks.""" + return await _execute_tools_parallel( + self, calls, timeout, on_tool_start, on_tool_result, max_concurrency + ) + + async def stream_execute_tools( + self, + calls: list[CTPToolCall], + timeout: float | None = None, + on_tool_start: Callable[[CTPToolCall], Awaitable[None]] | None = None, + max_concurrency: int = 4, + ) -> AsyncIterator[CTPToolResult]: + """Execute multiple tool calls in parallel, yielding results as they complete.""" + async for result in _stream_execute_tools( + self, calls, timeout, on_tool_start, max_concurrency + ): + yield result + + # ================================================================ + # Middleware Status + # ================================================================ + + def get_middleware_status(self) -> dict[str, Any] | None: + """Get middleware status for diagnostics. + + Returns Pydantic model dict with retry, circuit breaker, and rate limiting status. + """ + if not self.stream_manager: + return None + + try: + status = self.stream_manager.get_middleware_status() + return status.model_dump() if status else None + except Exception as e: + logger.error(f"Error getting middleware status: {e}") + return None + + @property + def middleware_enabled(self) -> bool: + """Check if middleware is currently enabled.""" + if not self.stream_manager: + return False + return bool(self.stream_manager.middleware_enabled) + # ================================================================ # LLM Integration (filtering + adaptation) # ================================================================ @@ -449,11 +747,22 @@ async def execute_tool( async def get_tools_for_llm(self, provider: str = "openai") -> list[dict[str, Any]]: """Get tools filtered and validated for LLM.""" try: - # Get all tools first (handles both stream_manager and registry paths) + # Check if dynamic tools mode is enabled + dynamic_mode = os.environ.get("MCP_CLI_DYNAMIC_TOOLS") == "1" + + if dynamic_mode: + dynamic_tools: list[dict[str, Any]] = ( + self.dynamic_tool_provider.get_dynamic_tools() + ) + logger.info( + f"Dynamic tools mode: Returning {len(dynamic_tools)} dynamic tools only" + ) + return dynamic_tools + + # Static mode: load all tools upfront all_tools = await self.get_all_tools() - # Convert ToolInfo to LLM format for filter - raw_tools = [ + raw_tools: list[dict[str, Any]] = [ { "type": "function", "function": { @@ -466,9 +775,42 @@ async def get_tools_for_llm(self, provider: str = "openai") -> list[dict[str, An for t in all_tools ] + # Apply include/exclude filtering from environment variables + include_tools = os.environ.get("MCP_CLI_INCLUDE_TOOLS") + exclude_tools = os.environ.get("MCP_CLI_EXCLUDE_TOOLS") + + if include_tools: + include_set = {name.strip() for name in include_tools.split(",")} + raw_tools = [ + tool + for tool in raw_tools + if tool["function"]["name"] in include_set + ] + logger.info( + f"Filtered to {len(raw_tools)} tools using include list: {include_set}" + ) + + if exclude_tools: + exclude_set = {name.strip() for name in exclude_tools.split(",")} + raw_tools = [ + tool + for tool in raw_tools + if tool["function"]["name"] not in exclude_set + ] + logger.info( + f"Filtered to {len(raw_tools)} tools using exclude list: {exclude_set}" + ) + # Filter and validate for provider valid_tools, _ = self.tool_filter.filter_tools(raw_tools, provider=provider) + logger.info( + f"Returning {len(valid_tools)} tools for LLM after all filtering" + ) + if len(valid_tools) <= 10: + for tool in valid_tools: + logger.info(f" - {tool['function']['name']}") + return valid_tools except Exception as e: @@ -483,7 +825,6 @@ async def get_adapted_tools_for_llm( """Get tools adapted for LLM with name mapping.""" tools = await self.get_tools_for_llm(provider) - # Create identity mapping if not provided if name_mapping is None: mapping = { tool["function"]["name"]: tool["function"]["name"] for tool in tools @@ -497,7 +838,9 @@ async def get_adapted_tools_for_llm( # Tool Filtering API (delegates to ToolFilter) # ================================================================ - def disable_tool(self, tool_name: str, reason: str = "user") -> None: + def disable_tool( + self, tool_name: str, reason: DisabledReason = DisabledReason.USER + ) -> None: """Disable a tool.""" self.tool_filter.disable_tool(tool_name, reason) @@ -539,7 +882,6 @@ async def validate_single_tool( if not tool: return False, f"Tool '{tool_name}' not found" - # Convert to dict for validation tool_dict = { "name": tool.name, "description": tool.description, @@ -558,71 +900,6 @@ async def validate_single_tool( except Exception as e: return False, str(e) - async def _attempt_transport_recovery( - self, - tool_name: str, - arguments: dict[str, Any], - namespace: str | None = None, - timeout: float | None = None, - ) -> ToolCallResult | None: - """ - Attempt to recover from transport errors by reconnecting to the server. - - This handles cases where the MCP server transport gets into a bad state - after timeouts or concurrent requests. - - Returns: - ToolCallResult if recovery succeeded and tool was executed, None otherwise - """ - try: - # First, try to identify which server this tool belongs to - server_name = namespace - if not server_name: - # Try to find the server by looking at available tools - tools = await self.get_all_tools() - for tool in tools: - if tool.name == tool_name: - server_name = tool.server_name - break - - if not server_name: - logger.warning(f"Could not identify server for tool {tool_name}") - return None - - logger.info(f"Attempting to reconnect to server '{server_name}' for tool '{tool_name}'") - - # Try to reconnect the specific server through StreamManager - if hasattr(self.stream_manager, 'reconnect_server'): - await self.stream_manager.reconnect_server(server_name) - elif hasattr(self.stream_manager, 'restart_server'): - await self.stream_manager.restart_server(server_name) - else: - # If no specific reconnect method, log warning - logger.warning( - f"StreamManager doesn't support reconnection - server {server_name} may remain in bad state" - ) - return None - - # Wait a moment for reconnection - import asyncio - await asyncio.sleep(0.5) - - # Retry the tool call once - logger.info(f"Retrying tool {tool_name} after transport recovery") - result = await self.stream_manager.call_tool( - tool_name=tool_name, - arguments=arguments, - server_name=namespace, - timeout=timeout or self.tool_timeout, - ) - - logger.info(f"Tool {tool_name} succeeded after recovery") - return ToolCallResult(tool_name=tool_name, success=True, result=result) - - except Exception as recovery_error: - logger.error(f"Transport recovery failed: {recovery_error}") - return None - async def revalidate_tools(self, provider: str = "openai") -> dict[str, Any]: """Revalidate all tools and return summary.""" try: @@ -653,53 +930,77 @@ async def revalidate_tools(self, provider: str = "openai") -> dict[str, Any]: def get_tool_validation_details(self, tool_name: str) -> dict[str, Any] | None: """Get validation details for a specific tool.""" - # For now, return basic info - can be expanded if validation cache is needed return {"name": tool_name, "status": "unknown"} + # ================================================================ + # Server Info + # ================================================================ + async def get_server_info(self) -> list[ServerInfo]: """Get information about connected servers.""" if not self.stream_manager: return [] try: - # Construct ServerInfo from detected servers servers = [] server_id = 0 - all_servers = self._http_servers + self._sse_servers + self._stdio_servers + http_servers = self._config_loader.http_servers + sse_servers = self._config_loader.sse_servers + stdio_servers = self._config_loader.stdio_servers + all_servers = http_servers + sse_servers + stdio_servers - # Get tool counts per server if available - tools = await self.get_all_tools() + # Get tool counts per server from StreamManager's tool_to_server_map + # This is the authoritative source for tool→server mapping tool_counts: dict[str, int] = {} - for tool in tools: - namespace = tool.namespace or "default" - tool_counts[namespace] = tool_counts.get(namespace, 0) + 1 + if hasattr(self.stream_manager, "tool_to_server_map"): + for server_name in self.stream_manager.tool_to_server_map.values(): + tool_counts[server_name] = tool_counts.get(server_name, 0) + 1 for server in all_servers: - server_name = server.get("name", "unknown") + server_name = server.name + + # Determine transport type and get transport-specific fields + from mcp_cli.config.server_models import ( + HTTPServerConfig, + STDIOServerConfig, + ) + + command: str | None = None + url: str | None = None + args: list[str] = [] + env: dict[str, str] = {} - # Determine transport type - if server in self._http_servers: + if server in http_servers: transport = TransportType.HTTP - elif server in self._sse_servers: + if isinstance(server, HTTPServerConfig): + url = server.url + elif server in sse_servers: transport = TransportType.SSE + if isinstance(server, HTTPServerConfig): + url = server.url else: transport = TransportType.STDIO + if isinstance(server, STDIOServerConfig): + command = server.command + args = list(server.args) + env = dict(server.env) servers.append( ServerInfo( id=server_id, name=server_name, - status="connected", + status=ServerStatus.CONNECTED.value, tool_count=tool_counts.get(server_name, 0), namespace=server_name, - enabled=True, + enabled=not server.disabled, connected=True, transport=transport, capabilities={}, - command=server.get("command"), - args=server.get("args", []), - env=server.get("env", {}), + command=command, + url=url, + args=args, + env=env, ) ) server_id += 1 @@ -724,19 +1025,6 @@ async def get_server_for_tool(self, tool_name: str) -> str | None: logger.error(f"Error getting server for tool: {e}") return None - async def stream_execute_tool( - self, - tool_name: str, - arguments: dict[str, Any], - namespace: str | None = None, - timeout: float | None = None, - ): - """Stream tool execution results (for tools that support streaming).""" - # For now, fall back to regular execution and yield once - # Can be enhanced when StreamManager supports streaming - result = await self.execute_tool(tool_name, arguments, namespace, timeout) - yield result - def get_streams(self): """Get active streams from StreamManager.""" if not self.stream_manager: @@ -746,7 +1034,6 @@ def get_streams(self): if hasattr(self.stream_manager, "get_streams"): return self.stream_manager.get_streams() return [] - except Exception as e: logger.error(f"Error getting streams: {e}") return [] @@ -760,7 +1047,6 @@ def list_resources(self): if hasattr(self.stream_manager, "list_resources"): return self.stream_manager.list_resources() return [] - except Exception as e: logger.error(f"Error listing resources: {e}") return [] @@ -774,7 +1060,6 @@ def list_prompts(self): if hasattr(self.stream_manager, "list_prompts"): return self.stream_manager.list_prompts() return [] - except Exception as e: logger.error(f"Error listing prompts: {e}") return [] diff --git a/src/mcp_cli/tools/models.py b/src/mcp_cli/tools/models.py index 0d8afd5c..84b40a35 100644 --- a/src/mcp_cli/tools/models.py +++ b/src/mcp_cli/tools/models.py @@ -8,6 +8,8 @@ from pydantic import BaseModel, Field +from mcp_cli.llm.content_models import ContentBlockType + # ────────────────────────────────────────────────────────────────────────────── # Constants and Enums @@ -136,7 +138,8 @@ class ServerInfo(BaseModel): capabilities: dict[str, Any] = Field(default_factory=dict) description: str | None = None # From server metadata version: str | None = None # Server version - command: str | None = None # Server command if known + command: str | None = None # Server command if known (for stdio) + url: str | None = None # Server URL (for http/sse) args: list[str] = Field(default_factory=list) # Command arguments env: dict[str, str] = Field(default_factory=dict) # Environment variables @@ -251,17 +254,20 @@ def _extract_mcp_text_content(self, result: Any) -> str | None: """Extract text content from MCP SDK ToolResult structure.""" if isinstance(result, dict): # Check for MCP response structure: {'isError': bool, 'content': ToolResult} - if 'content' in result and hasattr(result['content'], 'content'): + if "content" in result and hasattr(result["content"], "content"): # Extract content array from MCP ToolResult - tool_result_content = result['content'].content + tool_result_content = result["content"].content if isinstance(tool_result_content, list): - # Extract text from content blocks + # Extract text from content blocks - use enum, no magic strings! text_parts = [] for block in tool_result_content: - if isinstance(block, dict) and block.get('type') == 'text': - text_parts.append(block.get('text', '')) + if ( + isinstance(block, dict) + and block.get("type") == ContentBlockType.TEXT.value + ): + text_parts.append(block.get("text", "")) if text_parts: - return '\n'.join(text_parts) + return "\n".join(text_parts) return None @property @@ -543,3 +549,33 @@ class LLMToolDefinition(BaseModel): def to_dict(self) -> dict[str, Any]: """Convert to dictionary format for LLM API calls.""" return self.model_dump(mode="json") # type: ignore[no-any-return] + + +# ────────────────────────────────────────────────────────────────────────────── +# Tool Definition Input Models (for parsing raw tool dicts) +# ────────────────────────────────────────────────────────────────────────────── +class ToolInputSchema(BaseModel): + """Input schema for tool parameters (JSON Schema format).""" + + type: str = "object" + properties: dict[str, Any] = Field(default_factory=dict) + required: list[str] = Field(default_factory=list) + additionalProperties: bool = False + + model_config = {"frozen": True, "extra": "allow"} + + +class ToolDefinitionInput(BaseModel): + """Input model for parsing tool definitions from dicts. + + Used to convert raw tool dicts from chuk_tool_processor into ToolInfo models. + """ + + name: str + namespace: str = "default" + description: str | None = None + inputSchema: dict[str, Any] = Field(default_factory=dict) + is_async: bool = False + tags: list[str] = Field(default_factory=list) + + model_config = {"frozen": False, "extra": "ignore"} diff --git a/src/mcp_cli/tools/validation.py b/src/mcp_cli/tools/validation.py index 0f85ff1c..6de945e4 100644 --- a/src/mcp_cli/tools/validation.py +++ b/src/mcp_cli/tools/validation.py @@ -1,11 +1,10 @@ -# mcp_cli/tools/validation.py -""" -from __future__ import annotations +"""mcp_cli.tools.validation - Tool schema validation and filtering system. -Tool schema validation and filtering system. SIMPLIFIED: Focus on auto-fixing rather than strict validation. """ +from __future__ import annotations + import json import logging from typing import Any, cast diff --git a/src/mcp_cli/ui/__init__.py b/src/mcp_cli/ui/__init__.py deleted file mode 100644 index d8875da3..00000000 --- a/src/mcp_cli/ui/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# src/mcp_cli/ui/__init__.py diff --git a/src/mcp_cli/ui/chat_display_manager.py b/src/mcp_cli/ui/chat_display_manager.py deleted file mode 100644 index c034702a..00000000 --- a/src/mcp_cli/ui/chat_display_manager.py +++ /dev/null @@ -1,245 +0,0 @@ -# src/mcp_cli/ui/chat_display_manager.py -""" -from __future__ import annotations - -Centralized Chat Display Manager for MCP-CLI. - -This module consolidates ALL UI display logic for chat mode into a single -coherent system that prevents conflicts and ensures consistent behavior. - -Replaces scattered UI logic from: -- ui_manager.py (partial) -- tool_processor.py (display parts) -- streaming_handler.py (display parts) -- formatting.py (tool formatting) -- unified_display.py (abandoned approach) -""" - -import time -import json -from typing import Any - -from chuk_term.ui import output -from chuk_term.ui.terminal import clear_line -from mcp_cli.chat.models import ToolExecutionState - - -class ChatDisplayManager: - """Centralized display manager for all chat UI operations.""" - - def __init__(self, console=None) -> None: - # console parameter kept for compatibility but not used - # since we're using chuk-term instead of Rich - - # Display state - self.is_streaming = False - self.streaming_content = "" - self.streaming_start_time = 0.0 - - self.is_tool_executing = False - self.current_tool: ToolExecutionState | None = None - self.tool_start_time = 0.0 - - # Spinner animation - self.spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] - self.spinner_index = 0 - - # Track if we're showing live content - self.live_display_active = False - self.last_status_line = "" - - # ==================== STREAMING METHODS ==================== - - def start_streaming(self): - """Start streaming response display.""" - self.is_streaming = True - self.streaming_content = "" - self.streaming_start_time = time.time() - self._ensure_live_display() - - def update_streaming(self, content: str): - """Update streaming content.""" - if self.is_streaming: - self.streaming_content += content - self._refresh_display() - - def finish_streaming(self): - """Finish streaming and show final response.""" - if not self.is_streaming: - return - - self.is_streaming = False - self._stop_live_display() - - # Show final response - if self.streaming_content: - elapsed = time.time() - self.streaming_start_time - self._show_final_response(self.streaming_content, elapsed) - - # ==================== TOOL EXECUTION METHODS ==================== - - def start_tool_execution(self, tool_name: str, arguments: dict[str, Any]): - """Start animated tool execution display.""" - self.is_tool_executing = True - self.current_tool = ToolExecutionState( - name=tool_name, arguments=arguments, start_time=time.time() - ) - - # Start animated tool execution - self._ensure_live_display() - - def finish_tool_execution(self, result: str, success: bool = True): - """Finish tool execution and show final result.""" - if not self.is_tool_executing or not self.current_tool: - return - - # Store result for final display - elapsed_time = time.time() - self.current_tool.start_time - self.current_tool.result = result - self.current_tool.success = success - self.current_tool.elapsed = elapsed_time - self.current_tool.completed = True - - self.is_tool_executing = False - self._stop_live_display() - - # Show final tool result - self._show_final_tool_result() - self.current_tool = None - - # ==================== USER MESSAGE METHODS ==================== - - def show_user_message(self, message: str): - """Show user message.""" - # Display user message with a clear format - output.print(f"\n👤 User: {message}") - - def show_assistant_message(self, content: str, elapsed: float): - """Show assistant message (non-streaming).""" - # Display assistant message with timing - output.print(f"\n🤖 Assistant ({elapsed:.2f}s):") - output.print(content) - - # ==================== PRIVATE METHODS ==================== - - def _ensure_live_display(self): - """Ensure live display is active.""" - if not self.live_display_active: - self.live_display_active = True - self._refresh_display() - - def _stop_live_display(self): - """Stop live display.""" - if self.live_display_active: - # Clear the current status line - if self.last_status_line: - clear_line() - self.live_display_active = False - self.last_status_line = "" - - def _refresh_display(self): - """Refresh live display content.""" - if self.live_display_active: - # Clear previous line and show new status - if self.last_status_line: - # Move cursor up and clear line - print("\r", end="") - clear_line() - - status = self._create_live_status() - if status: - print(f"\r{status}", end="", flush=True) - self.last_status_line = status - - def _create_live_status(self) -> str: - """Create live display status line.""" - self.spinner_index = (self.spinner_index + 1) % len(self.spinner_frames) - spinner = self.spinner_frames[self.spinner_index] - - # Assistant streaming section - if self.is_streaming: - elapsed = time.time() - self.streaming_start_time - char_count = len(self.streaming_content) - status = f"{spinner} Generating response... {char_count:,} chars • {elapsed:.1f}s" - return status - - # Tool execution section - elif self.is_tool_executing and self.current_tool: - elapsed = time.time() - self.current_tool.start_time - dots = "." * (int(elapsed * 2) % 4) - status = ( - f"{spinner} Executing {self.current_tool.name}{dots} ({elapsed:.1f}s)" - ) - return status - - return "" - - def _show_final_response(self, content: str, elapsed: float): - """Show final response.""" - # Display final response with timing - output.print(f"\n🤖 Assistant ({elapsed:.2f}s):") - output.print(content) - - def _show_final_tool_result(self): - """Show final tool execution result.""" - if not self.current_tool: - return - - tool_info = self.current_tool - - # Status header - if tool_info.success: - output.success(f"✓ Completed: {tool_info.name} ({tool_info.elapsed:.2f}s)") - else: - output.error(f"✗ Failed: {tool_info.name} ({tool_info.elapsed:.2f}s)") - - # Arguments (compact) - args = tool_info.arguments - if args and any(str(v).strip() for v in args.values() if v is not None): - output.print("Arguments:") - filtered_args = { - k: v for k, v in args.items() if v is not None and str(v).strip() - } - for key, value in filtered_args.items(): - output.print(f" {key}: {value}") - - # Result - result = tool_info.result - if result: - output.print("Result:") - # Try to format result nicely - try: - # Try to parse as JSON for better formatting - parsed = json.loads(result) - formatted_result = json.dumps(parsed, indent=2) - output.code(formatted_result, language="json") - except (json.JSONDecodeError, TypeError): - # Use as plain text - output.print(str(result)) - - def _show_tool_invocation(self, tool_name: str, arguments: dict[str, Any]): - """Show tool invocation.""" - output.tool_call(tool_name, arguments) - - def _show_tool_result( - self, tool_info: dict[str, Any], result: str, elapsed: float, success: bool - ): - """Show tool execution result.""" - # Tool name and status - if success: - output.success(f"✓ Completed: {tool_info['name']} ({elapsed:.2f}s)") - else: - output.error(f"✗ Failed: {tool_info['name']} ({elapsed:.2f}s)") - - # Result - if result: - output.print("Result:") - # Try to format result nicely - try: - # Try to parse as JSON for better formatting - parsed = json.loads(result) - formatted_result = json.dumps(parsed, indent=2) - output.code(formatted_result, language="json") - except (json.JSONDecodeError, TypeError): - # Use as plain text - output.print(str(result)) diff --git a/src/mcp_cli/ui/streaming_display.py b/src/mcp_cli/ui/streaming_display.py deleted file mode 100644 index 44a31298..00000000 --- a/src/mcp_cli/ui/streaming_display.py +++ /dev/null @@ -1,390 +0,0 @@ -# src/mcp_cli/ui/streaming_display.py -""" -from __future__ import annotations - -Compact streaming display components for MCP-CLI. - -Provides content-aware streaming display with dynamic phase messages, -content type detection, and smooth progressive rendering. -""" - -import time -from typing import Generator -from rich.console import Group -from rich.live import Live -from rich.panel import Panel -from rich.text import Text -from rich.markdown import Markdown - - -def tokenize_text(text: str) -> Generator[str, None, None]: - """Generate tokens to simulate LLM streaming.""" - # Simple word-based tokenization - words = [] - current_word = "" - - for char in text: - if char in " \n\t": - if current_word: - words.append(current_word) - current_word = "" - words.append(char) - else: - current_word += char - - if current_word: - words.append(current_word) - - # Yield words in small groups for smoother streaming - buffer = "" - for word in words: - buffer += word - if len(buffer) > 15 or word == "\n": - yield buffer - buffer = "" - - if buffer: - yield buffer - - -class CompactStreamingDisplay: - """Compact streaming display that shows progress for any content type.""" - - def __init__(self, title: str = "🤖 Assistant", mode: str = "response"): - self.title = title - self.mode = mode # response, tool, thinking, etc. - self.first_lines: list[str] = [] # Store the first few lines - self.current_line = "" - self.total_chars = 0 - self.total_lines = 0 - self.spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] - self.spinner_index = 0 - self.preview_captured = False - self.max_preview_lines = 4 - self.detected_type: str | None = None - self.content = "" # Store full content - - def detect_content_type(self, text: str): - """Detect what type of content is being generated.""" - if self.detected_type: - return self.detected_type - - # Check for various content indicators - if "```" in text: - self.detected_type = "code" - elif self._is_markdown_table(text): - self.detected_type = "markdown_table" - elif "##" in text or "###" in text: - self.detected_type = "markdown" - elif ( - "def " in text - or "function " in text - or "class " in text - or "import " in text - ): - self.detected_type = "code" - elif any(x in text for x in ["CREATE TABLE", "SELECT", "INSERT", "UPDATE"]): - self.detected_type = "query" - elif any(x in text for x in ["", "
", "", " bool: - """Check if text contains a markdown table.""" - lines = text.split("\n") - pipe_lines = [line for line in lines if "|" in line] - - # Need at least 2 lines with pipes (header + separator) - if len(pipe_lines) < 2: - return False - - # Check for separator line with dashes - for line in pipe_lines: - if "|" in line and "-" in line: - # Count pipes and dashes - if line.count("|") >= 2 and line.count("-") >= 3: - return True - - return False - - def get_phase_message(self): - """Get appropriate phase message based on mode and progress.""" - if self.mode == "tool": - phases = [ - (0, "Preparing tool"), - (100, "Executing tool"), - (500, "Processing results"), - (1000, "Formatting output"), - (2000, "Completing execution"), - ] - elif self.mode == "thinking": - phases = [ - (0, "Thinking"), - (100, "Analyzing request"), - (300, "Formulating approach"), - (600, "Organizing thoughts"), - (1000, "Preparing response"), - ] - else: # response mode - if self.detected_type == "code": - phases = [ - (0, "Starting"), - (50, "Writing code"), - (500, "Adding implementation"), - (1000, "Adding documentation"), - (2000, "Finalizing code"), - ] - elif self.detected_type in ["table", "markdown_table"]: - phases = [ - (0, "Starting"), - (50, "Creating table"), - (200, "Adding rows"), - (500, "Formatting data"), - (1000, "Completing table"), - ] - elif self.detected_type == "markdown": - phases = [ - (0, "Starting"), - (50, "Writing content"), - (200, "Adding sections"), - (500, "Formatting text"), - (1000, "Completing document"), - ] - elif self.detected_type == "query": - phases = [ - (0, "Starting"), - (50, "Writing query"), - (200, "Adding conditions"), - (500, "Optimizing query"), - (1000, "Completing query"), - ] - else: # generic text - phases = [ - (0, "Starting"), - (50, "Generating response"), - (200, "Adding details"), - (500, "Elaborating"), - (1000, "Completing response"), - ] - - # Find the appropriate phase based on character count - for min_chars, message in reversed(phases): - if self.total_chars >= min_chars: - return message - - return phases[0][1] # Default to first phase - - def add_content(self, text: str): - """Process and store content.""" - self.total_chars += len(text) - self.content += text - - # Detect content type - if not self.detected_type and len(self.content) > 10: - self.detect_content_type(self.content) - - # Only capture the first few lines for preview - if not self.preview_captured: - for char in text: - if char == "\n": - if self.current_line.strip(): - self.first_lines.append(self.current_line) - self.total_lines += 1 - self.current_line = "" - - # Stop capturing after we have enough lines - if len(self.first_lines) >= self.max_preview_lines: - self.preview_captured = True - break - else: - self.current_line += char - if len(self.current_line) > 70: - self.first_lines.append(self.current_line[:70]) - self.current_line = self.current_line[70:] - - if len(self.first_lines) >= self.max_preview_lines: - self.preview_captured = True - break - else: - # Just count lines after preview is captured - self.total_lines += text.count("\n") - - def get_panel(self, elapsed: float) -> Panel: - """Get the compact display panel.""" - self.spinner_index = (self.spinner_index + 1) % len(self.spinner_frames) - spinner = self.spinner_frames[self.spinner_index] - - # Build display parts - display_parts = [] - - # Status line with spinner and dynamic phase - phase = self.get_phase_message() - status = f"{spinner} {phase}..." - display_parts.append(Text(status, style="yellow")) - - # Info line with statistics - if self.detected_type and self.detected_type != "text": - info = ( - f"⎿ {self.total_chars:,} chars • {elapsed:.1f}s • {self.detected_type}" - ) - else: - info = f"⎿ {self.total_chars:,} chars • {elapsed:.1f}s" - display_parts.append(Text(info, style="dim")) - display_parts.append(Text("")) # Empty line - - # Show the first few lines (preview) - if self.first_lines: - display_parts.append(Text(" Preview:", style="dim italic")) - for i, line in enumerate(self.first_lines[:3]): - line = line.strip() - if line: - # Style based on detected type - if self.detected_type == "code" and ( - line.startswith("def ") or line.startswith("class ") - ): - style = "green" - elif self.detected_type == "markdown" and line.startswith("#"): - style = "bold cyan" - elif self.detected_type == "table" and "|" in line: - style = "blue" - else: - style = "dim cyan" - - if len(line) > 55: - display_parts.append(Text(f" {line[:52]}...", style=style)) - else: - display_parts.append(Text(f" {line}", style=style)) - - # Add continuation indicator - if self.total_chars > 200: - display_parts.append( - Text(" ... generating content", style="dim italic") - ) - else: - # Just show a simple cursor while starting - display_parts.append(Text(" ▌", style="yellow blink")) - - return Panel( - Group(*display_parts), - title=self.title, - border_style="yellow", - height=10, # Fixed height for stability - expand=False, - ) - - def get_final_panel(self, elapsed: float) -> Panel: - """Get the final formatted panel with full content.""" - # Check if this is primarily a markdown table - has_markdown_table = self._is_markdown_table(self.content) - - # Determine how to render the content - should_render_markdown = False - - # For markdown tables with mixed content, we need special handling - if has_markdown_table: - # Check if it's JUST a table or has other markdown content - lines = self.content.split("\n") - non_table_lines = [ - line - for line in lines - if "|" not in line and line.strip() and not line.strip().startswith("-") - ] - - # If there's significant non-table content with markdown, render as markdown - has_other_markdown = any( - "##" in line or "```" in line or "**" in line - for line in non_table_lines - ) - - if has_other_markdown: - # Mixed content - try markdown but be ready to fall back - should_render_markdown = True - else: - # Mostly table - use plain text to preserve formatting - should_render_markdown = False - elif "```" in self.content: # Code blocks - should_render_markdown = True - elif "##" in self.content or "###" in self.content: # Headers - should_render_markdown = True - elif self.detected_type == "markdown": - should_render_markdown = True - - # Try to render as markdown if appropriate - content_display: Markdown | Text - if should_render_markdown: - try: - content_display = Markdown(self.content) - except Exception: - # Fallback to text if markdown rendering fails - content_display = Text(self.content, overflow="fold") - else: - # Use text with overflow handling for tables - content_display = Text(self.content, overflow="fold") - - # Create panel - return Panel( - content_display, - title=self.title, - subtitle=f"Response time: {elapsed:.2f}s", - subtitle_align="right", - border_style="green", - expand=True, # Keep normal expansion - ) - - -class StreamingContext: - """Context manager for streaming display.""" - - def __init__( - self, - console, - title: str = "🤖 Assistant", - mode: str = "response", - refresh_per_second: int = 8, - transient: bool = True, - ): - self.console = console - self.display = CompactStreamingDisplay(title=title, mode=mode) - self.refresh_per_second = refresh_per_second - self.transient = transient - self.live = None - self.start_time = time.time() - - def __enter__(self): - """Start the live display.""" - self.live = Live( - self.display.get_panel(0), - console=self.console, - refresh_per_second=self.refresh_per_second, - transient=self.transient, - ) - self.live.__enter__() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """Clean up the live display and show final panel.""" - if self.live: - self.live.__exit__(exc_type, exc_val, exc_tb) - - # Show final panel if we have content - if self.display.content: - elapsed = time.time() - self.start_time - final_panel = self.display.get_final_panel(elapsed) - self.console.print(final_panel) - - def update(self, content: str): - """Update the streaming display with new content.""" - self.display.add_content(content) - elapsed = time.time() - self.start_time - if self.live: - self.live.update(self.display.get_panel(elapsed)) # type: ignore[unreachable] - - @property - def content(self): - """Get the accumulated content.""" - return self.display.content diff --git a/src/mcp_cli/utils/async_utils.py b/src/mcp_cli/utils/async_utils.py deleted file mode 100644 index dd68a137..00000000 --- a/src/mcp_cli/utils/async_utils.py +++ /dev/null @@ -1,40 +0,0 @@ -# src/mcp_cli/utils/async_utils.py -""" -Tiny helper for “run an async coroutine from possibly-sync code”. - -* If no event-loop exists → `asyncio.run`. -* If a loop exists but is **not** running → `loop.run_until_complete`. -* If called **inside** a running loop → we raise, so callers know to - switch to the `*_async` variant instead of silently returning junk. -""" - -from __future__ import annotations - -import asyncio -from typing import Awaitable, TypeVar - -T = TypeVar("T") - - -def run_blocking(coro: Awaitable[T]) -> T: - try: - loop = asyncio.get_running_loop() - except RuntimeError: # totally sync context - # asyncio.run expects a coroutine, not just any awaitable - if asyncio.iscoroutine(coro): - coro_result: T = asyncio.run(coro) - return coro_result - else: - # If it's not a coroutine, wrap it - async def _wrapper() -> T: - return await coro - - wrapped_result: T = asyncio.run(_wrapper()) - return wrapped_result - - if loop.is_running(): - raise RuntimeError( - "run_blocking() called inside a running event-loop - " - "use the async API instead." - ) - return loop.run_until_complete(coro) diff --git a/src/mcp_cli/utils/llm_probe.py b/src/mcp_cli/utils/llm_probe.py deleted file mode 100644 index 900bc4b1..00000000 --- a/src/mcp_cli/utils/llm_probe.py +++ /dev/null @@ -1,241 +0,0 @@ -# src/mcp_cli/utils/llm_probe.py -""" -LLM Provider/Model availability testing utility. - -This module provides utilities for testing whether a provider/model combination -is available and working before committing to configuration changes. -""" - -from __future__ import annotations - -import logging -import re -from typing import Any -from dataclasses import dataclass - -from mcp_cli.model_management import ModelManager # ← CHANGED - - -@dataclass -class ProbeResult: - """Result of a provider/model availability probe.""" - - success: bool - error_message: str | None = None - client: Any | None = None - response: dict[str, Any] | None = None - - -class LLMProbe: - """Utility class for testing LLM provider/model availability.""" - - def __init__( - self, model_manager: ModelManager, suppress_logging: bool = True - ): # ← CHANGED - """ - Initialize the probe utility. - - Args: - model_manager: Model manager instance - suppress_logging: Whether to suppress chuk_llm logging during probes - """ - self.model_manager = model_manager # ← CHANGED - self.suppress_logging = suppress_logging - self._original_log_level: int | None = None - - def __enter__(self): - """Context manager entry - suppress logging if requested.""" - if self.suppress_logging: - chuk_logger = logging.getLogger("chuk_llm") - self._original_log_level = chuk_logger.level - chuk_logger.setLevel(logging.CRITICAL) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """Context manager exit - restore logging level.""" - if self.suppress_logging and self._original_log_level is not None: - chuk_logger = logging.getLogger("chuk_llm") - chuk_logger.setLevel(self._original_log_level) - - async def __aenter__(self): - """Async context manager entry - suppress logging if requested.""" - if self.suppress_logging: - chuk_logger = logging.getLogger("chuk_llm") - self._original_log_level = chuk_logger.level - chuk_logger.setLevel(logging.CRITICAL) - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - """Async context manager exit - restore logging level.""" - if self.suppress_logging and self._original_log_level is not None: - chuk_logger = logging.getLogger("chuk_llm") - chuk_logger.setLevel(self._original_log_level) - - async def test_provider_model( - self, provider: str, model: str, test_message: str = "ping" - ) -> ProbeResult: - """ - Test if a provider/model combination is available and working. - - Args: - provider: Provider name (e.g., 'openai', 'anthropic') - model: Model name (e.g., 'gpt-4', 'claude-3-sonnet') - test_message: Message to send for testing (default: "ping") - - Returns: - ProbeResult with success status, error message, and client if successful - """ - try: - # Create client using ModelManager's client creation method - client = self.model_manager.get_client(provider, model) - - # Test with a simple completion - response = await client.create_completion( - [{"role": "user", "content": test_message}] - ) - - # Validate the response - if self._is_valid_response(response): - return ProbeResult(success=True, client=client, response=response) - else: - error_msg = self._extract_error_message(response) - return ProbeResult( - success=False, error_message=error_msg, response=response - ) - - except Exception as exc: - return ProbeResult(success=False, error_message=str(exc)) - - async def test_model(self, model: str, test_message: str = "ping") -> ProbeResult: - """ - Test if a model is available with the current active provider. - - Args: - model: Model name to test - test_message: Message to send for testing - - Returns: - ProbeResult with success status and details - """ - provider = self.model_manager.get_active_provider() # ← CHANGED - return await self.test_provider_model(provider, model, test_message) - - async def test_provider( - self, provider: str, test_message: str = "ping" - ) -> ProbeResult: - """ - Test if a provider is available with its default model. - - Args: - provider: Provider name to test - test_message: Message to send for testing - - Returns: - ProbeResult with success status and details - """ - try: - # Validate provider exists in configuration - if not self.model_manager.validate_provider(provider): - raise ValueError(f"Provider {provider} not found") - model = self.model_manager.get_default_model(provider) - return await self.test_provider_model(provider, model, test_message) - except ValueError as e: - return ProbeResult(success=False, error_message=str(e)) - - def _is_valid_response(self, response: Any) -> bool: - """ - Check if a response indicates successful communication. - - Args: - response: Response from create_completion - - Returns: - True if response indicates success, False otherwise - """ - return ( - isinstance(response, dict) - and not response.get("error", False) - and isinstance(response.get("response"), str) - and response["response"].strip() - and not response["response"].strip().lower().startswith("error") - ) - - def _extract_error_message(self, response: Any) -> str: - """ - Extract a clean, user-friendly error message from a failed response. - - Args: - response: Failed response from create_completion - - Returns: - Clean error message string - """ - if not isinstance(response, dict): - return "Invalid response format" - - response_text = response.get("response", "") - if not response_text: - return "Provider returned empty response" - - # Try to extract meaningful error from structured error responses - if "Error code:" in response_text and "message" in response_text: - try: - # Extract error message from JSON-like structure - match = re.search(r"'message': '([^']+)'", response_text) - if match: - return match.group(1) - - # Fallback: extract error code - code_match = re.search(r"Error code: (\d+)", response_text) - if code_match: - return f"HTTP {code_match.group(1)} error - check model availability or authentication" - - except Exception: - pass - - # Fallback: return the response text (might be verbose but informative) - result: str = response_text - return result - - -# Convenience functions for common use cases -async def test_model_availability( - model: str, - model_manager: ModelManager, # ← CHANGED - suppress_logging: bool = True, -) -> ProbeResult: - """ - Quick function to test if a model is available with current provider. - - Args: - model: Model name to test - model_manager: Model manager instance - suppress_logging: Whether to suppress internal logging - - Returns: - ProbeResult indicating success/failure - """ - async with LLMProbe(model_manager, suppress_logging) as probe: - result: ProbeResult = await probe.test_model(model) - return result - - -async def test_provider_availability( - provider: str, - model_manager: ModelManager, # ← CHANGED - suppress_logging: bool = True, -) -> ProbeResult: - """ - Quick function to test if a provider is available with its default model. - - Args: - provider: Provider name to test - model_manager: Model manager instance - suppress_logging: Whether to suppress internal logging - - Returns: - ProbeResult indicating success/failure - """ - async with LLMProbe(model_manager, suppress_logging) as probe: - result: ProbeResult = await probe.test_provider(provider) - return result diff --git a/src/mcp_cli/utils/preferences.py b/src/mcp_cli/utils/preferences.py index acd82e2e..93f63076 100644 --- a/src/mcp_cli/utils/preferences.py +++ b/src/mcp_cli/utils/preferences.py @@ -43,37 +43,45 @@ class ToolRiskLevel(str, Enum): HIGH = "high" # System-wide or destructive operations +class ToolPatternRule(BaseModel): + """Pattern-based rule for tool confirmations - no dict goop!""" + + pattern: str = Field(description="Glob pattern for tool names") + action: str = Field(description="Action: always/never or risk level") + + model_config = {"frozen": True} + + class ToolConfirmationPreferences(BaseModel): """Tool confirmation preferences.""" - mode: str = "smart" # Global confirmation mode + mode: ConfirmationMode = ConfirmationMode.SMART per_tool: dict[str, str] = Field( default_factory=dict, description="Per-tool overrides (always/never/ask)" ) - patterns: list[dict[str, str]] = Field( + patterns: list[ToolPatternRule] = Field( default_factory=list, description="Pattern-based rules" ) - risk_thresholds: dict[str, bool] = Field( + risk_thresholds: dict[ToolRiskLevel, bool] = Field( default_factory=lambda: { - "safe": False, # Don't confirm safe tools - "moderate": True, # Confirm moderate risk tools - "high": True, # Always confirm high risk tools + ToolRiskLevel.SAFE: False, + ToolRiskLevel.MODERATE: True, + ToolRiskLevel.HIGH: True, } ) - categories: dict[str, str] = Field( + categories: dict[str, ToolRiskLevel] = Field( default_factory=lambda: { - # Default risk categories for common tool patterns - "read_*": "safe", - "list_*": "safe", - "get_*": "safe", - "describe_*": "safe", - "write_*": "moderate", - "create_*": "moderate", - "update_*": "moderate", - "delete_*": "high", - "remove_*": "high", - "execute_*": "high", - "run_*": "high", + "read_*": ToolRiskLevel.SAFE, + "list_*": ToolRiskLevel.SAFE, + "get_*": ToolRiskLevel.SAFE, + "describe_*": ToolRiskLevel.SAFE, + "write_*": ToolRiskLevel.MODERATE, + "create_*": ToolRiskLevel.MODERATE, + "update_*": ToolRiskLevel.MODERATE, + "delete_*": ToolRiskLevel.HIGH, + "remove_*": ToolRiskLevel.HIGH, + "execute_*": ToolRiskLevel.HIGH, + "run_*": ToolRiskLevel.HIGH, } ) @@ -81,7 +89,7 @@ class ToolConfirmationPreferences(BaseModel): class UIPreferences(BaseModel): """UI-related preferences.""" - theme: str = "default" + theme: Theme = Theme.DEFAULT verbose: bool = True confirm_tools: bool = True show_reasoning: bool = True @@ -192,8 +200,8 @@ def load_preferences(self) -> MCPPreferences: with open(self.preferences_file, "r") as f: data = json.load(f) return MCPPreferences.from_dict(data) - except (json.JSONDecodeError, KeyError): - # If preferences are corrupted, backup and create new + except (json.JSONDecodeError, KeyError, ValueError): + # If preferences are corrupted or invalid, backup and create new backup_file = self.preferences_file.with_suffix(".json.backup") self.preferences_file.rename(backup_file) return MCPPreferences() @@ -217,14 +225,15 @@ def set_theme(self, theme: str) -> None: Raises: ValueError: If theme is not valid """ - # Validate theme - valid_themes = [t.value for t in Theme] - if theme not in valid_themes: + try: + theme_enum = Theme(theme) + except ValueError: + valid_themes = [t.value for t in Theme] raise ValueError( f"Invalid theme: {theme}. Valid themes are: {', '.join(valid_themes)}" ) - self.preferences.ui.theme = theme + self.preferences.ui.theme = theme_enum self.save_preferences() def get_verbose(self) -> bool: @@ -238,16 +247,17 @@ def set_verbose(self, verbose: bool) -> None: def get_confirm_tools(self) -> bool: """Get tool confirmation setting (legacy compatibility).""" - return self.preferences.ui.tool_confirmation.mode != "never" + return self.preferences.ui.tool_confirmation.mode != ConfirmationMode.NEVER def set_confirm_tools(self, confirm: bool) -> None: """Set tool confirmation mode (legacy compatibility).""" self.preferences.ui.confirm_tools = confirm - # Also update new confirmation mode - self.preferences.ui.tool_confirmation.mode = "smart" if confirm else "never" + self.preferences.ui.tool_confirmation.mode = ( + ConfirmationMode.SMART if confirm else ConfirmationMode.NEVER + ) self.save_preferences() - def get_tool_confirmation_mode(self) -> str: + def get_tool_confirmation_mode(self) -> ConfirmationMode: """Get the global tool confirmation mode.""" return self.preferences.ui.tool_confirmation.mode @@ -257,11 +267,12 @@ def set_tool_confirmation_mode(self, mode: str) -> None: Args: mode: One of 'always', 'never', or 'smart' """ - if mode not in [m.value for m in ConfirmationMode]: + try: + confirmation_mode = ConfirmationMode(mode) + except ValueError: raise ValueError(f"Invalid confirmation mode: {mode}") - self.preferences.ui.tool_confirmation.mode = mode - # Update legacy flag - self.preferences.ui.confirm_tools = mode != "never" + self.preferences.ui.tool_confirmation.mode = confirmation_mode + self.preferences.ui.confirm_tools = confirmation_mode != ConfirmationMode.NEVER self.save_preferences() def get_tool_confirmation(self, tool_name: str) -> str | None: @@ -299,16 +310,15 @@ def clear_tool_confirmations(self) -> None: self.preferences.ui.tool_confirmation.per_tool.clear() self.save_preferences() - def get_tool_risk_level(self, tool_name: str) -> str: + def get_tool_risk_level(self, tool_name: str) -> ToolRiskLevel: """Determine the risk level of a tool based on patterns. Args: tool_name: Name of the tool Returns: - Risk level: 'safe', 'moderate', or 'high' + Risk level enum value. """ - # Check if tool matches any category pattern for pattern, risk in self.preferences.ui.tool_confirmation.categories.items(): if pattern.endswith("*"): prefix = pattern[:-1] @@ -319,8 +329,7 @@ def get_tool_risk_level(self, tool_name: str) -> str: if tool_name.endswith(suffix): return risk - # Default to moderate risk - return "moderate" + return ToolRiskLevel.MODERATE def should_confirm_tool(self, tool_name: str) -> bool: """Determine if a tool should be confirmed based on preferences. @@ -337,25 +346,21 @@ def should_confirm_tool(self, tool_name: str) -> bool: return True elif tool_setting == "never": return False - elif tool_setting == "ask": - # Use global mode - pass + # "ask" falls through to global mode # Check global mode - mode = self.get_tool_confirmation_mode() - if mode == "always": + mode = self.preferences.ui.tool_confirmation.mode + if mode == ConfirmationMode.ALWAYS: return True - elif mode == "never": + elif mode == ConfirmationMode.NEVER: return False - elif mode == "smart": - # Use risk-based decision + elif mode == ConfirmationMode.SMART: risk_level = self.get_tool_risk_level(tool_name) return self.preferences.ui.tool_confirmation.risk_thresholds.get( risk_level, True ) - # Default to confirming - return True + return True # type: ignore[unreachable] # safety fallback def add_tool_pattern(self, pattern: str, action: str) -> None: """Add a pattern-based rule for tool confirmations. @@ -364,9 +369,8 @@ def add_tool_pattern(self, pattern: str, action: str) -> None: pattern: Glob pattern for tool names action: 'always', 'never', or risk level """ - self.preferences.ui.tool_confirmation.patterns.append( - {"pattern": pattern, "action": action} - ) + rule = ToolPatternRule(pattern=pattern, action=action) + self.preferences.ui.tool_confirmation.patterns.append(rule) self.save_preferences() def remove_tool_pattern(self, pattern: str) -> bool: @@ -381,7 +385,7 @@ def remove_tool_pattern(self, pattern: str) -> bool: patterns = self.preferences.ui.tool_confirmation.patterns original_len = len(patterns) self.preferences.ui.tool_confirmation.patterns = [ - p for p in patterns if p.get("pattern") != pattern + p for p in patterns if p.pattern != pattern ] if len(self.preferences.ui.tool_confirmation.patterns) < original_len: self.save_preferences() @@ -395,11 +399,11 @@ def set_risk_threshold(self, risk_level: str, should_confirm: bool) -> None: risk_level: 'safe', 'moderate', or 'high' should_confirm: Whether to confirm tools at this risk level """ - if risk_level not in ["safe", "moderate", "high"]: + try: + level = ToolRiskLevel(risk_level) + except ValueError: raise ValueError(f"Invalid risk level: {risk_level}") - self.preferences.ui.tool_confirmation.risk_thresholds[risk_level] = ( - should_confirm - ) + self.preferences.ui.tool_confirmation.risk_thresholds[level] = should_confirm self.save_preferences() def get_active_provider(self) -> str | None: @@ -645,11 +649,9 @@ def get_custom_provider_api_key(self, name: str) -> str | None: if not provider_data: return None - # Get the environment variable name - env_var = provider_data.get("env_var_name") - if not env_var: - # Use default pattern - env_var = f"{name.upper().replace('-', '_')}_API_KEY" + # Parse provider data into model - no dict goop! + provider = CustomProvider.from_dict(provider_data) + env_var = provider.get_env_var_name() return os.environ.get(env_var) @@ -666,6 +668,7 @@ def get_custom_provider_api_key(self, name: str) -> str | None: "ProviderPreferences", "ServerPreferences", "ToolConfirmationPreferences", + "ToolPatternRule", "CustomProvider", "Theme", "ConfirmationMode", diff --git a/tests/adapters/test_interactive_adapter.py b/tests/adapters/test_interactive_adapter.py index 87232ced..5706db6f 100644 --- a/tests/adapters/test_interactive_adapter.py +++ b/tests/adapters/test_interactive_adapter.py @@ -284,7 +284,7 @@ def test_parse_arguments(self): assert kwargs == {"option": "value"} kwargs = self.adapter._parse_arguments(cmd, ["positional"]) - assert kwargs == {"args": "positional"} + assert kwargs == {"args": ["positional"]} kwargs = self.adapter._parse_arguments(cmd, ["pos1", "pos2"]) assert kwargs == {"args": ["pos1", "pos2"]} diff --git a/tests/auth/test_token_actions.py b/tests/auth/test_token_actions.py deleted file mode 100644 index 960bbd14..00000000 --- a/tests/auth/test_token_actions.py +++ /dev/null @@ -1,531 +0,0 @@ -"""Tests for token command actions.""" - -import json -from unittest.mock import patch - -import pytest - -from mcp_cli.auth import OAuthTokens -from mcp_cli.auth import TokenType -from mcp_cli.commands.actions.token import ( - token_backends_action_async, - token_clear_action_async, - token_delete_action_async, - token_get_action_async, - token_list_action_async, - token_set_action_async, -) -from mcp_cli.commands.models import ( - TokenListParams, - TokenSetParams, - TokenDeleteParams, - TokenClearParams, -) - - -@pytest.fixture -def mock_token_manager(tmp_path): - """Mock TokenManager with temporary storage.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock: - from mcp_cli.auth import TokenManager - from mcp_cli.auth import TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - mock.return_value = manager - yield manager - - -@pytest.fixture -def sample_oauth_tokens(): - """Sample OAuth tokens.""" - return OAuthTokens( - access_token="test-access-token", - refresh_token="test-refresh-token", - expires_in=3600, - token_type="Bearer", - ) - - -class TestTokenListAction: - """Test token list action.""" - - @pytest.mark.asyncio - async def test_list_empty(self, mock_token_manager): - """Test listing when no tokens exist.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_list_action_async(TokenListParams()) - # Should show info/warning messages - # The function shows either warning about no tokens or info about token management - assert mock_output.info.called or mock_output.warning.called - - @pytest.mark.asyncio - async def test_list_with_oauth_tokens( - self, mock_token_manager, sample_oauth_tokens - ): - """Test listing OAuth tokens.""" - # Store token - mock_token_manager.save_tokens("test-server", sample_oauth_tokens) - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_list_action_async(TokenListParams()) - # Should print table - mock_output.print_table.assert_called() - - @pytest.mark.asyncio - async def test_list_with_bearer_tokens(self, mock_token_manager): - """Test listing bearer tokens.""" - # Store bearer token - mock_token_manager.token_store.store_generic("my-api", "token123", "bearer") - mock_token_manager.registry.register("my-api", TokenType.BEARER, "bearer") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_list_action_async(TokenListParams()) - mock_output.print_table.assert_called() - - @pytest.mark.asyncio - async def test_list_filter_by_namespace(self, mock_token_manager): - """Test filtering tokens by namespace.""" - # Store tokens in different namespaces - mock_token_manager.token_store.store_generic("token1", "value1", "ns1") - mock_token_manager.registry.register("token1", TokenType.BEARER, "ns1") - - mock_token_manager.token_store.store_generic("token2", "value2", "ns2") - mock_token_manager.registry.register("token2", TokenType.BEARER, "ns2") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_list_action_async(TokenListParams(namespace="ns1")) - mock_output.print_table.assert_called() - - @pytest.mark.asyncio - async def test_list_filter_by_type(self, mock_token_manager, sample_oauth_tokens): - """Test filtering tokens by type.""" - # Store different token types - mock_token_manager.save_tokens("server1", sample_oauth_tokens) - mock_token_manager.token_store.store_generic("bearer1", "value1", "bearer") - mock_token_manager.registry.register("bearer1", TokenType.BEARER, "bearer") - - # List only bearer tokens - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_list_action_async( - TokenListParams(show_oauth=False, show_bearer=True) - ) - mock_output.print_table.assert_called() - - @pytest.mark.asyncio - async def test_list_shows_expiration(self, mock_token_manager): - """Test that list shows expiration dates.""" - import time - - # Store token with expiration - expires_at = time.time() + 3600 - mock_token_manager.token_store.store_generic("token1", "value1", "bearer") - mock_token_manager.registry.register( - "token1", TokenType.BEARER, "bearer", metadata={"expires_at": expires_at} - ) - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_list_action_async(TokenListParams()) - # Should format table with expiration column - mock_output.print_table.assert_called() - - @pytest.mark.asyncio - async def test_list_error_handling(self): - """Test error handling in list action.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - with pytest.raises(Exception): - await token_list_action_async(TokenListParams()) - - -class TestTokenSetAction: - """Test token set action.""" - - @pytest.mark.asyncio - async def test_set_bearer_token(self, mock_token_manager): - """Test storing bearer token.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_set_action_async( - TokenSetParams( - name="my-token", - token_type="bearer", - value="token123", - namespace="bearer", - ) - ) - - # Should show success - mock_output.success.assert_called() - - # Verify stored - retrieved = mock_token_manager.token_store.retrieve_generic( - "my-token", "bearer" - ) - assert retrieved is not None - - @pytest.mark.asyncio - async def test_set_api_key(self, mock_token_manager): - """Test storing API key.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_set_action_async( - TokenSetParams( - name="openai", - token_type="api-key", - value="sk-123", - provider="openai", - namespace="api-key", - ) - ) - - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_set_api_key_without_provider(self, mock_token_manager): - """Test that API key requires provider.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_set_action_async( - TokenSetParams(name="openai", token_type="api-key", value="sk-123") - ) - - # Should show error - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_set_generic_token(self, mock_token_manager): - """Test storing generic token.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_set_action_async( - TokenSetParams( - name="my-token", - token_type="generic", - value="value123", - namespace="custom", - ) - ) - - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_set_prompts_for_value(self, mock_token_manager): - """Test that set prompts for value if not provided.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with patch("getpass.getpass") as mock_getpass: - mock_getpass.return_value = "prompted-value" - - await token_set_action_async( - TokenSetParams(name="my-token", token_type="bearer") - ) - - # Should call getpass - mock_getpass.assert_called() - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_set_empty_value(self, mock_token_manager): - """Test that empty value shows error.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with patch("getpass.getpass") as mock_getpass: - mock_getpass.return_value = "" - - await token_set_action_async( - TokenSetParams(name="my-token", token_type="bearer") - ) - - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_set_unknown_type(self, mock_token_manager): - """Test error handling for unknown token type.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_set_action_async( - TokenSetParams(name="my-token", token_type="unknown", value="value123") - ) - - mock_output.error.assert_called() - mock_output.hint.assert_called() - - -class TestTokenGetAction: - """Test token get action.""" - - @pytest.mark.asyncio - async def test_get_existing_token(self, mock_token_manager): - """Test getting token information.""" - # Store token - from mcp_cli.auth import BearerToken - - bearer = BearerToken(token="token123") - stored = bearer.to_stored_token("my-token") - stored.metadata = {"namespace": "bearer"} - mock_token_manager.token_store._store_raw( - "bearer:my-token", json.dumps(stored.model_dump()) - ) - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_get_action_async(name="my-token", namespace="bearer") - - # Should show token info - mock_output.rule.assert_called() - mock_output.info.assert_called() - - @pytest.mark.asyncio - async def test_get_nonexistent_token(self, mock_token_manager): - """Test getting token that doesn't exist.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_get_action_async(name="nonexistent", namespace="bearer") - - mock_output.warning.assert_called() - - @pytest.mark.asyncio - async def test_get_with_different_namespace(self, mock_token_manager): - """Test getting token from specific namespace.""" - from mcp_cli.auth import APIKeyToken - - api_key = APIKeyToken(provider="openai", key="sk-123") - stored = api_key.to_stored_token("openai") - stored.metadata = {"namespace": "api-key"} - mock_token_manager.token_store._store_raw( - "api-key:openai", json.dumps(stored.model_dump()) - ) - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_get_action_async(name="openai", namespace="api-key") - - mock_output.info.assert_called() - - -class TestTokenDeleteAction: - """Test token delete action.""" - - @pytest.mark.asyncio - async def test_delete_oauth_token(self, mock_token_manager, sample_oauth_tokens): - """Test deleting OAuth token.""" - # Store OAuth token - mock_token_manager.save_tokens("test-server", sample_oauth_tokens) - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_delete_action_async( - TokenDeleteParams(name="test-server", oauth=True) - ) - - mock_output.success.assert_called() - - # Verify deleted - assert not mock_token_manager.has_valid_tokens("test-server") - - @pytest.mark.asyncio - async def test_delete_generic_token(self, mock_token_manager): - """Test deleting generic token.""" - # Store token - mock_token_manager.token_store.store_generic("my-token", "value123", "bearer") - mock_token_manager.registry.register("my-token", TokenType.BEARER, "bearer") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_delete_action_async( - TokenDeleteParams(name="my-token", namespace="bearer") - ) - - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_delete_with_namespace_search(self, mock_token_manager): - """Test deleting token without specifying namespace.""" - # Store token in bearer namespace - mock_token_manager.token_store.store_generic("my-token", "value123", "bearer") - mock_token_manager.registry.register("my-token", TokenType.BEARER, "bearer") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - # Delete without namespace - should search common namespaces - await token_delete_action_async(TokenDeleteParams(name="my-token")) - - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_delete_nonexistent_token(self, mock_token_manager): - """Test deleting token that doesn't exist.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_delete_action_async(TokenDeleteParams(name="nonexistent")) - - mock_output.warning.assert_called() - - -class TestTokenClearAction: - """Test token clear action.""" - - @pytest.mark.asyncio - async def test_clear_namespace_with_confirmation(self, mock_token_manager): - """Test clearing tokens in a namespace.""" - # Store tokens - mock_token_manager.token_store.store_generic("token1", "value1", "ns1") - mock_token_manager.registry.register("token1", TokenType.BEARER, "ns1") - mock_token_manager.token_store.store_generic("token2", "value2", "ns1") - mock_token_manager.registry.register("token2", TokenType.BEARER, "ns1") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with patch("chuk_term.ui.prompts.confirm", return_value=True): - await token_clear_action_async(TokenClearParams(namespace="ns1")) - - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_clear_all_with_confirmation(self, mock_token_manager): - """Test clearing all tokens.""" - # Store tokens - mock_token_manager.token_store.store_generic("token1", "value1", "ns1") - mock_token_manager.registry.register("token1", TokenType.BEARER, "ns1") - mock_token_manager.token_store.store_generic("token2", "value2", "ns2") - mock_token_manager.registry.register("token2", TokenType.BEARER, "ns2") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with patch("chuk_term.ui.prompts.confirm", return_value=True): - await token_clear_action_async(TokenClearParams()) - - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_clear_cancelled(self, mock_token_manager): - """Test canceling clear operation.""" - # Store token - mock_token_manager.token_store.store_generic("token1", "value1", "ns1") - mock_token_manager.registry.register("token1", TokenType.BEARER, "ns1") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with patch("chuk_term.ui.prompts.confirm", return_value=False): - await token_clear_action_async(TokenClearParams(namespace="ns1")) - - mock_output.warning.assert_called_with("Cancelled") - - @pytest.mark.asyncio - async def test_clear_with_force(self, mock_token_manager): - """Test clearing with force flag.""" - # Store token - mock_token_manager.token_store.store_generic("token1", "value1", "ns1") - mock_token_manager.registry.register("token1", TokenType.BEARER, "ns1") - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - # Force should not prompt - await token_clear_action_async( - TokenClearParams(namespace="ns1", force=True) - ) - - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_clear_empty_namespace(self, mock_token_manager): - """Test clearing when no tokens exist.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_clear_action_async( - TokenClearParams(namespace="empty", force=True) - ) - - mock_output.warning.assert_called() - - -class TestTokenBackendsAction: - """Test token backends action.""" - - @pytest.mark.asyncio - async def test_list_backends(self): - """Test listing available backends.""" - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_backends_action_async() - - # Should print table of backends - mock_output.print_table.assert_called() - mock_output.info.assert_called() - - @pytest.mark.asyncio - async def test_backends_shows_detected(self): - """Test that detected backend is indicated.""" - - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_backends_action_async() - - # Should show which backend is auto-detected - mock_output.info.assert_called() - - -class TestErrorHandling: - """Test error handling in token actions.""" - - @pytest.mark.asyncio - async def test_list_error(self): - """Test list action error handling.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with pytest.raises(Exception): - await token_list_action_async(TokenListParams()) - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_set_error(self): - """Test set action error handling.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with pytest.raises(Exception): - await token_set_action_async( - TokenSetParams( - name="test", token_type="bearer", value="value123" - ) - ) - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_get_error(self): - """Test get action error handling.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with pytest.raises(Exception): - await token_get_action_async(name="test") - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_delete_error(self): - """Test delete action error handling.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with pytest.raises(Exception): - await token_delete_action_async(TokenDeleteParams(name="test")) - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_clear_error(self): - """Test clear action error handling.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with pytest.raises(Exception): - await token_clear_action_async(TokenClearParams(force=True)) - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_backends_error(self): - """Test backends action error handling.""" - with patch( - "mcp_cli.commands.actions.token.TokenStoreFactory.get_available_backends", - side_effect=Exception("Test error"), - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - with pytest.raises(Exception): - await token_backends_action_async() - mock_output.error.assert_called() diff --git a/tests/auth/test_token_actions_coverage.py b/tests/auth/test_token_actions_coverage.py deleted file mode 100644 index 2155b06b..00000000 --- a/tests/auth/test_token_actions_coverage.py +++ /dev/null @@ -1,218 +0,0 @@ -"""Additional tests for token actions to improve coverage.""" - -import pytest -from unittest.mock import patch, MagicMock -import time - -from mcp_cli.commands.actions.token import ( - token_list_action_async, - token_set_action_async, - _get_token_manager, -) -from mcp_cli.commands.models import ( - TokenListParams, - TokenSetParams, -) -from mcp_cli.auth import TokenType - - -class TestGetTokenManager: - """Test _get_token_manager helper function.""" - - def test_get_token_manager_with_config(self): - """Test getting token manager with valid config.""" - mock_config = MagicMock() - mock_config.token_store_backend = "encrypted_file" - - with patch( - "mcp_cli.commands.actions.token.get_config", return_value=mock_config - ): - with patch("mcp_cli.commands.actions.token.TokenManager") as mock_tm: - _get_token_manager() - mock_tm.assert_called_once() - - def test_get_token_manager_with_config_error(self): - """Test getting token manager when config raises exception.""" - with patch( - "mcp_cli.commands.actions.token.get_config", - side_effect=Exception("Config error"), - ): - with patch("mcp_cli.commands.actions.token.TokenManager") as mock_tm: - _get_token_manager() - # Should fall back to AUTO backend - mock_tm.assert_called_once() - - -class TestTokenListActionCoverage: - """Additional coverage tests for token_list_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager with temporary storage.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_list_with_provider_tokens(self, mock_token_manager): - """Test listing with show_providers=True and provider tokens.""" - # Mock provider tokens - mock_provider_tokens = { - "openai": { - "env_var": "OPENAI_API_KEY", - "in_env": False, - }, - "anthropic": { - "env_var": "ANTHROPIC_API_KEY", - "in_env": True, - }, - } - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.list_all_provider_tokens", - return_value=mock_provider_tokens, - ): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenListParams(show_providers=True) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_with_show_oauth(self, mock_token_manager): - """Test listing with show_oauth=True.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenListParams(show_oauth=True) - await token_list_action_async(params) - # Should show OAuth info message - assert mock_output.info.called - - @pytest.mark.asyncio - async def test_list_skip_provider_namespace(self, mock_token_manager): - """Test that provider namespace is skipped when show_providers is True.""" - # Register a token in provider namespace - mock_token_manager.registry.register( - "test-token", TokenType.API_KEY, "provider" - ) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.list_all_provider_tokens", return_value={} - ): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenListParams(show_providers=True) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_filter_bearer_tokens(self, mock_token_manager): - """Test filtering bearer tokens when show_bearer=False.""" - # Register bearer token - mock_token_manager.registry.register("bearer-token", TokenType.BEARER, "test") - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenListParams(show_bearer=False) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_filter_api_key_tokens(self, mock_token_manager): - """Test filtering API key tokens when show_api_keys=False.""" - # Register API key token - mock_token_manager.registry.register("api-key-token", TokenType.API_KEY, "test") - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenListParams(show_api_keys=False) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_with_expired_token(self, mock_token_manager): - """Test listing token with expired timestamp.""" - # Register token with expired metadata - mock_token_manager.registry.register( - "expired-token", - TokenType.BEARER, - "test", - metadata={"expires_at": time.time() - 3600}, # 1 hour ago - ) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenListParams() - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_with_provider_metadata(self, mock_token_manager): - """Test listing token with provider in metadata.""" - # Register token with provider metadata - mock_token_manager.registry.register( - "provider-token", - TokenType.API_KEY, - "test", - metadata={"provider": "custom-provider"}, - ) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenListParams() - await token_list_action_async(params) - - -class TestTokenSetActionCoverage: - """Additional coverage tests for token_set_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_set_generic_token_with_all_params(self, mock_token_manager): - """Test setting generic token with all parameters.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenSetParams( - name="my-custom-token", - value="token-value", - token_type="bearer", - namespace="custom", - ) - await token_set_action_async(params) diff --git a/tests/auth/test_token_actions_extended.py b/tests/auth/test_token_actions_extended.py deleted file mode 100644 index ee2df618..00000000 --- a/tests/auth/test_token_actions_extended.py +++ /dev/null @@ -1,457 +0,0 @@ -"""Extended tests for token actions to reach 90%+ coverage.""" - -import pytest -from unittest.mock import patch, MagicMock - -from mcp_cli.commands.actions.token import ( - token_set_action_async, - token_get_action_async, - token_delete_action_async, - token_set_provider_action_async, - token_get_provider_action_async, - token_delete_provider_action_async, - token_clear_action_async, - token_backends_action_async, -) -from mcp_cli.commands.models import ( - TokenSetParams, - TokenDeleteParams, - TokenProviderParams, - TokenClearParams, -) - - -class TestTokenSetExtended: - """Extended tests for token_set_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_set_bearer_token_with_expiration(self, mock_token_manager): - """Test setting bearer token that has expires_at.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output"): - # BearerToken with expires_in will have expires_at - params = TokenSetParams( - name="bearer-with-exp", - value="bearer-value", - token_type="bearer", - namespace="test", - ) - await token_set_action_async(params) - - -class TestTokenGetExtended: - """Extended tests for token_get_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_get_token_parsing_error(self, mock_token_manager): - """Test when token data can't be parsed.""" - # Mock store to return invalid JSON - mock_token_manager.token_store._retrieve_raw = MagicMock( - return_value="invalid json" - ) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_get_action_async("test-token", "test") - # Should show warning about parsing - assert any( - "Could not parse" in str(call) - for call in mock_output.warning.call_args_list - ) - - -class TestTokenDeleteExtended: - """Extended tests for token_delete_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_delete_oauth_token_found(self, mock_token_manager): - """Test deleting OAuth token that exists.""" - mock_token_manager.delete_tokens = MagicMock(return_value=True) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenDeleteParams(name="server-name", oauth=True) - await token_delete_action_async(params) - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_delete_oauth_token_not_found(self, mock_token_manager): - """Test deleting OAuth token that doesn't exist.""" - mock_token_manager.delete_tokens = MagicMock(return_value=False) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenDeleteParams(name="server-name", oauth=True) - await token_delete_action_async(params) - mock_output.warning.assert_called() - - -class TestTokenProviderSetExtended: - """Extended tests for token_set_provider_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_set_provider_with_getpass(self, mock_token_manager): - """Test setting provider token using getpass.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.set_provider_token", return_value=True - ): - with patch( - "mcp_cli.auth.provider_tokens.get_provider_env_var_name", - return_value="OPENAI_API_KEY", - ): - with patch("getpass.getpass", return_value="secret-key"): - with patch("mcp_cli.commands.actions.token.output"): - params = TokenProviderParams(provider="openai") - await token_set_provider_action_async(params) - - @pytest.mark.asyncio - async def test_set_provider_empty_api_key(self, mock_token_manager): - """Test setting provider token with empty API key.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("getpass.getpass", return_value=""): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenProviderParams(provider="openai") - await token_set_provider_action_async(params) - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_set_provider_with_env_var_set(self, mock_token_manager): - """Test setting provider token when env var is also set.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.set_provider_token", return_value=True - ): - with patch( - "mcp_cli.auth.provider_tokens.get_provider_env_var_name", - return_value="OPENAI_API_KEY", - ): - with patch("os.environ.get", return_value="env-value"): - with patch( - "mcp_cli.commands.actions.token.output" - ) as mock_output: - params = TokenProviderParams( - provider="openai", api_key="test-key" - ) - await token_set_provider_action_async(params) - # Should show warning about env var precedence - assert mock_output.warning.called - - @pytest.mark.asyncio - async def test_set_provider_failure(self, mock_token_manager): - """Test when setting provider token fails.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.set_provider_token", return_value=False - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenProviderParams(provider="openai", api_key="test-key") - await token_set_provider_action_async(params) - mock_output.error.assert_called() - - -class TestTokenProviderGetExtended: - """Extended tests for token_get_provider_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_get_provider_with_token(self, mock_token_manager): - """Test getting provider info when token exists.""" - mock_status = { - "has_token": True, - "source": "storage", - "env_var": "OPENAI_API_KEY", - "in_env": False, - "in_storage": True, - } - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status", - return_value=mock_status, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenProviderParams(provider="openai") - await token_get_provider_action_async(params) - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_get_provider_without_token(self, mock_token_manager): - """Test getting provider info when no token exists.""" - mock_status = { - "has_token": False, - "source": None, - "env_var": "OPENAI_API_KEY", - "in_env": False, - "in_storage": False, - } - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status", - return_value=mock_status, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenProviderParams(provider="openai") - await token_get_provider_action_async(params) - # Should show instructions on how to set - assert any( - "To set API key" in str(call) - for call in mock_output.info.call_args_list - ) - - -class TestTokenProviderDeleteExtended: - """Extended tests for token_delete_provider_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_delete_provider_with_token(self, mock_token_manager): - """Test delete provider display when token exists.""" - mock_status = { - "has_token": True, - "source": "storage", - "env_var": "OPENAI_API_KEY", - "in_env": False, - "in_storage": True, - } - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status", - return_value=mock_status, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenProviderParams(provider="openai") - await token_delete_provider_action_async(params) - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_delete_provider_without_token(self, mock_token_manager): - """Test delete provider display when no token exists.""" - mock_status = { - "has_token": False, - "source": None, - "env_var": "OPENAI_API_KEY", - "in_env": False, - "in_storage": False, - } - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status", - return_value=mock_status, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenProviderParams(provider="openai") - await token_delete_provider_action_async(params) - # Should show instructions - assert any( - "To set API key" in str(call) - for call in mock_output.info.call_args_list - ) - - -class TestTokenClearExtended: - """Extended tests for token_clear_action_async.""" - - @pytest.fixture - def mock_token_manager(self, tmp_path): - """Mock TokenManager.""" - from mcp_cli.auth import TokenManager, TokenStoreBackend - - manager = TokenManager( - token_dir=tmp_path / "tokens", - backend=TokenStoreBackend.ENCRYPTED_FILE, - password="test-password", - service_name="mcp-cli", - ) - return manager - - @pytest.mark.asyncio - async def test_clear_with_no_tokens(self, mock_token_manager): - """Test clearing when no tokens exist.""" - mock_token_manager.registry.list_tokens = MagicMock(return_value=[]) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenClearParams(force=True) - await token_clear_action_async(params) - mock_output.warning.assert_called() - - @pytest.mark.asyncio - async def test_clear_cancelled(self, mock_token_manager): - """Test clearing when user cancels.""" - mock_token_manager.registry.list_tokens = MagicMock( - return_value=[{"name": "token1", "namespace": "test"}] - ) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("chuk_term.ui.prompts.confirm", return_value=False): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenClearParams() - await token_clear_action_async(params) - mock_output.warning.assert_called_with("Cancelled") - - @pytest.mark.asyncio - async def test_clear_no_tokens_deleted(self, mock_token_manager): - """Test clearing when no tokens can be deleted.""" - mock_token_manager.registry.list_tokens = MagicMock( - return_value=[{"name": "token1", "namespace": "test"}] - ) - mock_token_manager.token_store.delete_generic = MagicMock(return_value=False) - - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - return_value=mock_token_manager, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - params = TokenClearParams(force=True) - await token_clear_action_async(params) - # Should show warning - assert any( - "No tokens" in str(call) - for call in mock_output.warning.call_args_list - ) - - -class TestTokenBackends: - """Tests for token_backends_action_async.""" - - @pytest.mark.asyncio - async def test_backends_listing(self): - """Test listing token storage backends.""" - from mcp_cli.auth import TokenStoreBackend - - mock_available = [TokenStoreBackend.ENCRYPTED_FILE, TokenStoreBackend.KEYCHAIN] - mock_detected = TokenStoreBackend.ENCRYPTED_FILE - - with patch( - "mcp_cli.commands.actions.token.TokenStoreFactory.get_available_backends", - return_value=mock_available, - ): - with patch( - "mcp_cli.commands.actions.token.TokenStoreFactory._detect_backend", - return_value=mock_detected, - ): - with patch("mcp_cli.commands.actions.token.output") as mock_output: - await token_backends_action_async() - mock_output.print_table.assert_called() diff --git a/tests/chat/guards/__init__.py b/tests/chat/guards/__init__.py new file mode 100644 index 00000000..f95c062e --- /dev/null +++ b/tests/chat/guards/__init__.py @@ -0,0 +1,2 @@ +# tests/chat/guards/__init__.py +"""Tests for chat guards.""" diff --git a/tests/chat/guards/test_ungrounded.py b/tests/chat/guards/test_ungrounded.py new file mode 100644 index 00000000..54404672 --- /dev/null +++ b/tests/chat/guards/test_ungrounded.py @@ -0,0 +1,195 @@ +# tests/chat/guards/test_ungrounded.py +"""Tests for UngroundedGuard.""" + +import pytest + +from chuk_ai_session_manager.guards import ( + EnforcementLevel, + UngroundedGuard, + UngroundedGuardConfig, + ValueBinding, + ValueType, +) +from chuk_tool_processor.guards import GuardVerdict + + +class TestUngroundedGuard: + """Tests for UngroundedGuard.""" + + @pytest.fixture + def guard_warn(self): + """Guard in warn mode.""" + return UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.WARN, grace_calls=1), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + @pytest.fixture + def guard_block(self): + """Guard in block mode.""" + return UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.BLOCK, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + @pytest.fixture + def guard_with_user_literals(self): + """Guard with user literals.""" + return UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.BLOCK, grace_calls=0), + get_user_literals=lambda: {37.0, 18.0, 900.0}, + get_bindings=lambda: {}, + ) + + @pytest.fixture + def guard_with_bindings(self): + """Guard with bindings available.""" + bindings = { + "v1": ValueBinding( + id="v1", + tool_name="sqrt", + args_hash="abc", + raw_value=4.2426, + value_type=ValueType.NUMBER, + ) + } + return UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.WARN, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: bindings, + ) + + def test_allows_no_numeric_args(self, guard_block): + """Test allows calls without numeric args.""" + result = guard_block.check("search_tools", {"query": "cdf"}) + assert result.verdict == GuardVerdict.ALLOW + + def test_warns_on_ungrounded_in_warn_mode(self, guard_warn): + """Test warns on ungrounded args in warn mode.""" + # First call uses grace + guard_warn.check("sqrt", {"x": 18}) + # Second call should warn + result = guard_warn.check("multiply", {"a": 2, "b": 3}) + assert result.verdict in [GuardVerdict.WARN, GuardVerdict.ALLOW] + + def test_blocks_on_ungrounded_in_block_mode(self, guard_block): + """Test blocks ungrounded args in block mode.""" + result = guard_block.check("sqrt", {"x": 18}) + assert result.blocked is True + + def test_allows_user_literals(self, guard_with_user_literals): + """Test allows user-provided literals.""" + result = guard_with_user_literals.check("multiply", {"a": 37, "b": 18}) + assert result.verdict == GuardVerdict.ALLOW + + def test_blocks_non_user_literals(self, guard_with_user_literals): + """Test blocks non-user literals.""" + result = guard_with_user_literals.check("sqrt", {"x": 99}) + assert result.blocked is True + + def test_warns_when_bindings_available(self, guard_with_bindings): + """Test warns when bindings exist but not used.""" + result = guard_with_bindings.check("multiply", {"a": 2, "b": 3}) + # Should warn that bindings exist + assert result.verdict in [GuardVerdict.WARN, GuardVerdict.ALLOW] + + def test_grace_calls(self): + """Test grace period warns instead of blocking.""" + guard = UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.BLOCK, grace_calls=2), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + # First two calls warn (during grace period) + result1 = guard.check("normal_cdf", {"x": 18}) + assert result1.verdict == GuardVerdict.WARN + + result2 = guard.check("t_test", {"a": 2, "b": 3}) + assert result2.verdict == GuardVerdict.WARN + + # Third call should block (grace exhausted) + result3 = guard.check("chi_square", {"a": 10, "b": 2}) + assert result3.blocked is True + + def test_reset(self, guard_warn): + """Test reset clears grace counter.""" + guard_warn.check("normal_cdf", {"x": 18}) + guard_warn.reset() + + # Should have grace again (warn not block in warn mode) + result = guard_warn.check("normal_cdf", {"x": 18}) + assert result.verdict == GuardVerdict.WARN + + def test_mode_off_always_allows(self): + """Test OFF mode always allows (line 71).""" + guard = UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.OFF, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + result = guard.check("sqrt", {"x": 18}) + assert result.verdict == GuardVerdict.ALLOW + + def test_allows_when_references_exist(self): + """Test allows when $vN references exist (line 87).""" + guard = UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.BLOCK, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + # Arguments with $v1 reference + result = guard.check("normal_cdf", {"x": "$v1"}) + assert result.verdict == GuardVerdict.ALLOW + + def test_skips_tool_name_arg(self): + """Test tool_name argument is skipped (line 146).""" + guard = UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.WARN, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + # tool_name should be ignored even if numeric + result = guard.check("sqrt", {"tool_name": 123, "x": "$v1"}) + assert result.verdict == GuardVerdict.ALLOW + + def test_skips_bool_values(self): + """Test boolean values are skipped (line 148).""" + guard = UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.WARN, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + # Bool should be skipped even though bool is subclass of int + result = guard.check("tool", {"flag": True, "x": "$v1"}) + assert result.verdict == GuardVerdict.ALLOW + + def test_detects_numeric_strings(self): + """Test detects numeric string values (lines 155-156).""" + guard = UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.WARN, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + # String that parses as number should be detected + result = guard.check("tool", {"x": "3.14"}) + assert result.verdict == GuardVerdict.WARN + + def test_ignores_non_numeric_strings(self): + """Test ignores non-numeric strings (line 157).""" + guard = UngroundedGuard( + config=UngroundedGuardConfig(mode=EnforcementLevel.WARN, grace_calls=0), + get_user_literals=lambda: set(), + get_bindings=lambda: {}, + ) + + # Non-numeric strings should be ignored + result = guard.check("tool", {"name": "hello"}) + assert result.verdict == GuardVerdict.ALLOW diff --git a/tests/chat/state/__init__.py b/tests/chat/state/__init__.py new file mode 100644 index 00000000..990ef544 --- /dev/null +++ b/tests/chat/state/__init__.py @@ -0,0 +1,2 @@ +# tests/chat/state/__init__.py +"""Tests for chat state management.""" diff --git a/tests/chat/state/test_bindings.py b/tests/chat/state/test_bindings.py new file mode 100644 index 00000000..e86d04c4 --- /dev/null +++ b/tests/chat/state/test_bindings.py @@ -0,0 +1,299 @@ +# tests/chat/state/test_bindings.py +"""Tests for BindingManager.""" + +import pytest + +from chuk_ai_session_manager.guards.bindings import BindingManager +from chuk_ai_session_manager.guards.models import ValueType + + +class TestBindingManager: + """Tests for BindingManager.""" + + @pytest.fixture + def manager(self): + return BindingManager() + + def test_bind_creates_binding(self, manager): + """Test that bind creates a new binding.""" + binding = manager.bind("sqrt", {"x": 18}, 4.2426) + assert binding.id == "v1" + assert binding.tool_name == "sqrt" + assert binding.raw_value == 4.2426 + assert binding.value_type == ValueType.NUMBER + + def test_bind_increments_id(self, manager): + """Test that bind increments ID for each binding.""" + b1 = manager.bind("sqrt", {"x": 18}, 4.2426) + b2 = manager.bind("multiply", {"a": 2, "b": 3}, 6) + assert b1.id == "v1" + assert b2.id == "v2" + + def test_bind_with_aliases(self, manager): + """Test binding with aliases.""" + binding = manager.bind("sqrt", {"x": 666}, 25.807, aliases=["sigma_LT"]) + assert "sigma_LT" in binding.aliases + + def test_get_by_id(self, manager): + """Test getting binding by ID.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + binding = manager.get("v1") + assert binding is not None + assert binding.raw_value == 4.2426 + + def test_get_by_alias(self, manager): + """Test getting binding by alias.""" + manager.bind("sqrt", {"x": 666}, 25.807, aliases=["sigma_LT"]) + binding = manager.get("sigma_LT") + assert binding is not None + assert binding.raw_value == 25.807 + + def test_get_nonexistent(self, manager): + """Test getting non-existent binding returns None.""" + binding = manager.get("v99") + assert binding is None + + def test_resolve_references_simple(self, manager): + """Test resolving $vN references in arguments.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + resolved = manager.resolve_references({"x": "$v1"}) + # JSON serialization may return string or float + assert float(resolved["x"]) == pytest.approx(4.2426) + + def test_resolve_references_nested(self, manager): + """Test resolving nested references.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.bind("multiply", {"a": 2, "b": 3}, 6) + resolved = manager.resolve_references({"values": {"a": "$v1", "b": "$v2"}}) + # JSON serialization means we check the structure + assert "values" in resolved + + def test_resolve_references_missing(self, manager): + """Test that missing references are preserved.""" + resolved = manager.resolve_references({"x": "$v99"}) + assert resolved["x"] == "$v99" + + def test_mark_used(self, manager): + """Test marking a binding as used.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.mark_used("v1", "normal_cdf") + + binding = manager.get("v1") + assert binding.used is True + assert "normal_cdf" in binding.used_in + + def test_format_for_model_empty(self, manager): + """Test format_for_model with no bindings.""" + formatted = manager.format_for_model() + assert formatted == "" + + def test_format_for_model_with_bindings(self, manager): + """Test format_for_model with bindings.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.bind("multiply", {"a": 2, "b": 3}, 6) + formatted = manager.format_for_model() + assert "$v1" in formatted + assert "$v2" in formatted + + def test_reset(self, manager): + """Test reset clears all bindings.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.reset() + + assert manager.get("v1") is None + assert len(manager.bindings) == 0 + assert manager.next_id == 1 + + def test_len(self, manager): + """Test __len__ returns binding count.""" + assert len(manager) == 0 + manager.bind("sqrt", {"x": 18}, 4.2426) + assert len(manager) == 1 + manager.bind("multiply", {"a": 2, "b": 3}, 6) + assert len(manager) == 2 + + def test_check_references_valid(self, manager): + """Test check_references with valid references.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + valid, missing, resolved = manager.check_references({"x": "$v1"}) + assert valid is True + assert len(missing) == 0 + + def test_check_references_missing(self, manager): + """Test check_references with missing references.""" + valid, missing, resolved = manager.check_references({"x": "$v99"}) + assert valid is False + assert "$v99" in missing or "v99" in missing + + def test_each_bind_creates_new_binding(self, manager): + """Test that each bind creates a new binding (no deduplication at this level).""" + b1 = manager.bind("sqrt", {"x": 18}, 4.2426) + b2 = manager.bind("sqrt", {"x": 18}, 4.2426) + # BindingManager doesn't deduplicate - each bind creates new binding + # Deduplication is handled at a higher level (ToolStateManager) + assert b1.id != b2.id + + def test_different_args_different_binding(self, manager): + """Test that different args create different bindings.""" + b1 = manager.bind("sqrt", {"x": 18}, 4.2426) + b2 = manager.bind("sqrt", {"x": 19}, 4.3589) + assert b1.id != b2.id + + # ------------------------------------------------------------------------- + # Additional coverage tests for uncovered lines + # ------------------------------------------------------------------------- + + def test_add_alias_success(self, manager): + """Test add_alias to existing binding.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + result = manager.add_alias("v1", "sigma") + assert result is True + # Can now get by alias + binding = manager.get("sigma") + assert binding is not None + assert binding.id == "v1" + + def test_add_alias_to_nonexistent_binding(self, manager): + """Test add_alias returns False for non-existent binding.""" + result = manager.add_alias("v99", "sigma") + assert result is False + + def test_add_alias_duplicate(self, manager): + """Test adding same alias twice.""" + manager.bind("sqrt", {"x": 18}, 4.2426, aliases=["sigma"]) + result = manager.add_alias("v1", "sigma") + assert result is True + # Alias should not be duplicated + binding = manager.get("v1") + assert binding.aliases.count("sigma") == 1 + + def test_get_numeric_values(self, manager): + """Test get_numeric_values returns all numeric values.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.bind("multiply", {"a": 2, "b": 3}, 6) + manager.bind("echo", {"msg": "hello"}, "hello") # non-numeric + values = manager.get_numeric_values() + assert 4.2426 in values + assert 6.0 in values + assert len(values) == 2 + + def test_get_numeric_values_empty(self, manager): + """Test get_numeric_values with no bindings.""" + values = manager.get_numeric_values() + assert values == set() + + def test_resolve_references_with_non_numeric_value(self, manager): + """Test resolve_references with string value.""" + manager.bind("echo", {"msg": "test"}, "hello world") + resolved = manager.resolve_references({"msg": "$v1"}) + # String values get JSON serialized - when it fails to resolve, + # the original reference is preserved + # When it succeeds, value is inserted + assert "msg" in resolved + + def test_find_by_value_exact_match(self, manager): + """Test find_by_value with exact match.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + binding = manager.find_by_value(4.2426) + assert binding is not None + assert binding.raw_value == 4.2426 + + def test_find_by_value_tolerance_match(self, manager): + """Test find_by_value with tolerance match.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + binding = manager.find_by_value(4.24261, tolerance=0.001) + assert binding is not None + + def test_find_by_value_no_match(self, manager): + """Test find_by_value with no match.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + binding = manager.find_by_value(999.999) + assert binding is None + + def test_find_by_value_non_numeric_bindings(self, manager): + """Test find_by_value skips non-numeric bindings.""" + manager.bind("echo", {"msg": "test"}, "hello") + binding = manager.find_by_value(4.2426) + assert binding is None + + def test_find_by_value_with_small_values(self, manager): + """Test find_by_value with very small values (avoid division issues).""" + manager.bind("divide", {"a": 1, "b": 1000000}, 0.000001) + binding = manager.find_by_value(0.000001) + assert binding is not None + + def test_get_unused(self, manager): + """Test get_unused returns unused bindings.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.bind("multiply", {"a": 2, "b": 3}, 6) + manager.mark_used("v1", "normal_cdf") + + unused = manager.get_unused() + assert len(unused) == 1 + assert unused[0].id == "v2" + + def test_get_unused_all_used(self, manager): + """Test get_unused when all bindings are used.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.mark_used("v1", "normal_cdf") + + unused = manager.get_unused() + assert len(unused) == 0 + + def test_format_unused_warning_with_unused(self, manager): + """Test format_unused_warning with unused bindings.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.bind("multiply", {"a": 2, "b": 3}, 6) + + warning = manager.format_unused_warning() + assert "$v1" in warning + assert "$v2" in warning + + def test_format_unused_warning_none_unused(self, manager): + """Test format_unused_warning when all used.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + manager.mark_used("v1", "normal_cdf") + + warning = manager.format_unused_warning() + assert warning == "" + + def test_bool_empty(self, manager): + """Test __bool__ returns False when empty.""" + assert bool(manager) is False + + def test_bool_with_bindings(self, manager): + """Test __bool__ returns True with bindings.""" + manager.bind("sqrt", {"x": 18}, 4.2426) + assert bool(manager) is True + + +class TestClassifyValueType: + """Tests for classify_value_type function.""" + + def test_classify_list_type(self): + """Test classifying list values.""" + from chuk_ai_session_manager.guards.bindings import classify_value_type + from chuk_ai_session_manager.guards.models import ValueType + + assert classify_value_type([1, 2, 3]) == ValueType.LIST + + def test_classify_dict_type(self): + """Test classifying dict values.""" + from chuk_ai_session_manager.guards.bindings import classify_value_type + from chuk_ai_session_manager.guards.models import ValueType + + assert classify_value_type({"key": "value"}) == ValueType.OBJECT + + def test_classify_unknown_type(self): + """Test classifying unknown type (None, etc).""" + from chuk_ai_session_manager.guards.bindings import classify_value_type + from chuk_ai_session_manager.guards.models import ValueType + + assert classify_value_type(None) == ValueType.UNKNOWN + + def test_classify_numeric_string(self): + """Test classifying numeric string.""" + from chuk_ai_session_manager.guards.bindings import classify_value_type + from chuk_ai_session_manager.guards.models import ValueType + + assert classify_value_type("123.45") == ValueType.NUMBER diff --git a/tests/chat/state/test_cache.py b/tests/chat/state/test_cache.py new file mode 100644 index 00000000..94d2fcb9 --- /dev/null +++ b/tests/chat/state/test_cache.py @@ -0,0 +1,155 @@ +# tests/chat/state/test_cache.py +"""Tests for ResultCache.""" + +import pytest + +from chuk_ai_session_manager.guards.cache import ResultCache + + +class TestResultCache: + """Tests for ResultCache.""" + + @pytest.fixture + def cache(self): + return ResultCache() + + def test_put_and_get(self, cache): + """Test caching and retrieving a result.""" + cache.put("sqrt", {"x": 18}, 4.2426) + cached = cache.get("sqrt", {"x": 18}) + assert cached is not None + assert cached.result == 4.2426 + + def test_cache_miss(self, cache): + """Test cache miss returns None.""" + cached = cache.get("sqrt", {"x": 18}) + assert cached is None + + def test_duplicate_increments_count(self, cache): + """Test that duplicate calls increment count.""" + cache.put("sqrt", {"x": 18}, 4.2426) + cached1 = cache.get("sqrt", {"x": 18}) + assert cached1.call_count == 2 # put + get + + cached2 = cache.get("sqrt", {"x": 18}) + assert cached2.call_count == 3 + + def test_duplicate_count_tracking(self, cache): + """Test duplicate count is tracked.""" + cache.put("sqrt", {"x": 18}, 4.2426) + assert cache.duplicate_count == 0 + + cache.get("sqrt", {"x": 18}) + assert cache.duplicate_count == 1 + + def test_store_variable(self, cache): + """Test storing named variables.""" + var = cache.store_variable("sigma", 5.5, units="units/day") + assert var.name == "sigma" + assert var.value == 5.5 + assert var.units == "units/day" + + def test_get_variable(self, cache): + """Test retrieving stored variables.""" + cache.store_variable("sigma", 5.5) + var = cache.get_variable("sigma") + assert var is not None + assert var.value == 5.5 + + def test_get_variable_missing(self, cache): + """Test getting non-existent variable returns None.""" + var = cache.get_variable("nonexistent") + assert var is None + + def test_format_state_empty(self, cache): + """Test format_state with empty cache.""" + state = cache.format_state() + assert state == "" + + def test_format_state_with_results(self, cache): + """Test format_state with cached results.""" + cache.put("sqrt", {"x": 18}, 4.2426) + cache.put("multiply", {"a": 2, "b": 3}, 6) + state = cache.format_state() + assert "sqrt" in state or "multiply" in state + + def test_reset(self, cache): + """Test reset clears all state.""" + cache.put("sqrt", {"x": 18}, 4.2426) + cache.store_variable("sigma", 5.5) + cache.reset() + + assert cache.get("sqrt", {"x": 18}) is None + assert cache.get_variable("sigma") is None + assert cache.duplicate_count == 0 + + def test_eviction(self): + """Test LRU eviction when cache is full.""" + cache = ResultCache(max_size=3) + cache.put("tool1", {"x": 1}, 1) + cache.put("tool2", {"x": 2}, 2) + cache.put("tool3", {"x": 3}, 3) + cache.put("tool4", {"x": 4}, 4) + + # First entry should be evicted + assert cache.get("tool1", {"x": 1}) is None + assert cache.get("tool4", {"x": 4}) is not None + + def test_get_stats(self, cache): + """Test get_stats returns correct info.""" + cache.put("sqrt", {"x": 18}, 4.2426) + cache.store_variable("sigma", 5.5) + + stats = cache.get_stats() + assert stats["total_cached"] == 1 + assert stats["total_variables"] == 1 + + def test_format_duplicate_message(self, cache): + """Test format_duplicate_message.""" + cache.put("sqrt", {"x": 18}, 4.2426) + msg = cache.format_duplicate_message("sqrt", {"x": 18}) + assert "sqrt" in msg + assert "4.2426" in msg or "cached" in msg.lower() + + # ------------------------------------------------------------------------- + # Additional coverage tests for uncovered lines + # ------------------------------------------------------------------------- + + def test_format_duplicate_message_no_cache(self, cache): + """Test format_duplicate_message when not in cache.""" + msg = cache.format_duplicate_message("sqrt", {"x": 99}) + assert "sqrt" in msg + assert "no cached result" in msg.lower() + + def test_format_state_with_variables_only(self, cache): + """Test format_state with only variables (no tool results).""" + cache.store_variable("sigma", 5.5, units="units/day") + state = cache.format_state() + assert "sigma" in state + assert "Stored Variables" in state + + def test_format_state_variables_and_results(self, cache): + """Test format_state with both variables and results.""" + cache.store_variable("sigma", 5.5) + cache.put("sqrt", {"x": 18}, 4.2426) + state = cache.format_state() + assert "sigma" in state + assert "sqrt" in state + + def test_format_state_max_items_limit(self, cache): + """Test format_state respects max_items.""" + # Add multiple variables + for i in range(15): + cache.store_variable(f"var{i}", float(i)) + state = cache.format_state(max_items=5) + # Should only show 5 variables + assert state.count("var") <= 10 # Some slack for formatting + + def test_format_state_separator_between_sections(self, cache): + """Test format_state adds separator between variable and result sections.""" + cache.store_variable("sigma", 5.5) + cache.put("sqrt", {"x": 18}, 4.2426) + state = cache.format_state() + # Should have both sections + assert "Stored Variables" in state + assert "Computed Values" in state diff --git a/tests/chat/state/test_manager.py b/tests/chat/state/test_manager.py new file mode 100644 index 00000000..c4b64cf8 --- /dev/null +++ b/tests/chat/state/test_manager.py @@ -0,0 +1,842 @@ +# tests/chat/state/test_manager.py +"""Tests for ToolStateManager.""" + +import pytest + +from chuk_ai_session_manager.guards.manager import ( + ToolStateManager, + get_tool_state, + reset_tool_state, +) +from chuk_ai_session_manager.guards.models import ( + EnforcementLevel, + RuntimeLimits, + RuntimeMode, +) +from chuk_tool_processor.guards import GuardVerdict + + +class TestToolStateManager: + """Tests for ToolStateManager coordinator.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_bind_value(self, manager): + """Test binding a value.""" + binding = manager.bind_value("sqrt", {"x": 18}, 4.2426) + assert binding.id == "v1" + assert binding.raw_value == 4.2426 + + def test_get_binding(self, manager): + """Test getting a binding.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + binding = manager.get_binding("v1") + assert binding is not None + assert binding.raw_value == 4.2426 + + def test_resolve_references(self, manager): + """Test resolving references.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + resolved = manager.resolve_references({"x": "$v1"}) + # JSON serialization may return string or float + assert float(resolved["x"]) == pytest.approx(4.2426) + + def test_cache_result(self, manager): + """Test caching results.""" + cached = manager.cache_result("sqrt", {"x": 18}, 4.2426) + assert cached.tool_name == "sqrt" + assert cached.result == 4.2426 + + def test_get_cached_result(self, manager): + """Test retrieving cached results.""" + manager.cache_result("sqrt", {"x": 18}, 4.2426) + cached = manager.get_cached_result("sqrt", {"x": 18}) + assert cached is not None + assert cached.result == 4.2426 + + def test_store_variable(self, manager): + """Test storing variables.""" + var = manager.store_variable("sigma", 5.5, units="units/day") + assert var.name == "sigma" + assert var.value == 5.5 + + +class TestToolClassification: + """Tests for tool classification.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_is_discovery_tool_search(self, manager): + """Test search_tools is classified as discovery.""" + assert manager.is_discovery_tool("search_tools") is True + + def test_is_discovery_tool_list(self, manager): + """Test list_tools is classified as discovery.""" + assert manager.is_discovery_tool("list_tools") is True + + def test_is_discovery_tool_schema(self, manager): + """Test get_tool_schema is classified as discovery.""" + assert manager.is_discovery_tool("get_tool_schema") is True + + def test_is_execution_tool(self, manager): + """Test execution tools are not discovery.""" + assert manager.is_execution_tool("sqrt") is True + assert manager.is_execution_tool("normal_cdf") is True + + def test_is_idempotent_math_tool(self, manager): + """Test idempotent math tool classification.""" + assert manager.is_idempotent_math_tool("sqrt") is True + assert manager.is_idempotent_math_tool("multiply") is True + # CDF functions are parameterized, not idempotent math + assert manager.is_idempotent_math_tool("normal_cdf") is False + assert manager.is_parameterized_tool("normal_cdf") is True + + +class TestGuardChecks: + """Tests for guard integration.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_check_all_guards_allows_valid(self, manager): + """Test that valid calls are allowed.""" + # Bind a value first so precondition passes + manager.bind_value("sqrt", {"x": 18}, 4.2426) + result = manager.check_all_guards("multiply", {"a": 2, "b": 3}) + assert result.verdict == GuardVerdict.ALLOW + + def test_check_preconditions_blocks_premature(self, manager): + """Test precondition blocks parameterized tools without values.""" + allowed, error = manager.check_preconditions("normal_cdf", {"x": 1.5}) + assert allowed is False + assert error is not None + + def test_check_preconditions_allows_after_values(self, manager): + """Test precondition allows after values computed.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + allowed, error = manager.check_preconditions("normal_cdf", {"x": 4.2426}) + assert allowed is True + + +class TestRunawayDetection: + """Tests for runaway detection.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_check_runaway_under_budget(self, manager): + """Test no runaway under budget.""" + status = manager.check_runaway() + assert status.should_stop is False + + def test_record_numeric_result(self, manager): + """Test recording numeric results.""" + manager.record_numeric_result(4.2426) + assert 4.2426 in manager._recent_numeric_results + + +class TestConfiguration: + """Tests for configuration.""" + + def test_configure_limits(self): + """Test configuring runtime limits.""" + manager = ToolStateManager() + limits = RuntimeLimits(tool_budget_total=20, execution_budget=15) + manager.configure(limits) + assert manager.limits.tool_budget_total == 20 + + def test_set_mode_smooth(self): + """Test setting smooth mode.""" + manager = ToolStateManager() + manager.set_mode(RuntimeMode.SMOOTH) + assert manager.limits.require_bindings == EnforcementLevel.WARN + + def test_set_mode_strict(self): + """Test setting strict mode.""" + manager = ToolStateManager() + manager.set_mode(RuntimeMode.STRICT) + assert manager.limits.require_bindings == EnforcementLevel.BLOCK + + +class TestUserLiterals: + """Tests for user literal registration.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_register_user_literals(self, manager): + """Test extracting literals from user text.""" + count = manager.register_user_literals("I sell 37 units per day") + assert count > 0 + assert 37.0 in manager.user_literals + + def test_register_multiple_literals(self, manager): + """Test extracting multiple literals.""" + manager.register_user_literals("Lead time is 18 days, I have 900 units") + assert 18.0 in manager.user_literals + assert 900.0 in manager.user_literals + + +class TestLifecycle: + """Tests for lifecycle management.""" + + def test_reset_for_new_prompt(self): + """Test reset_for_new_prompt clears per-prompt state.""" + manager = ToolStateManager() + manager.bind_value("sqrt", {"x": 18}, 4.2426) + manager.register_user_literals("37 units") + + manager.reset_for_new_prompt() + + assert len(manager.bindings) == 0 + assert len(manager.user_literals) == 0 + + def test_clear(self): + """Test clear removes all state.""" + manager = ToolStateManager() + manager.bind_value("sqrt", {"x": 18}, 4.2426) + manager.cache_result("sqrt", {"x": 18}, 4.2426) + + manager.clear() + + assert manager.get_binding("v1") is None + assert manager.get_cached_result("sqrt", {"x": 18}) is None + + +class TestGlobalState: + """Tests for global state functions.""" + + def test_get_tool_state_singleton(self): + """Test get_tool_state returns singleton.""" + reset_tool_state() + state1 = get_tool_state() + state2 = get_tool_state() + assert state1 is state2 + + def test_reset_tool_state(self): + """Test reset_tool_state creates new instance.""" + state1 = get_tool_state() + state1.bind_value("sqrt", {"x": 18}, 4.2426) + + reset_tool_state() + state2 = get_tool_state() + + assert state2.get_binding("v1") is None + + +# ============================================================================= +# Additional coverage tests for manager.py +# ============================================================================= + + +class TestCheckAllGuards: + """Tests for check_all_guards method.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_check_all_guards_warns_on_non_blocking(self, manager): + """Test warnings are logged but don't block.""" + # Bind values first + manager.bind_value("sqrt", {"x": 18}, 4.2426) + result = manager.check_all_guards("add", {"a": 1, "b": 2}) + assert result.verdict == GuardVerdict.ALLOW + + def test_check_all_guards_skips_none_guards(self, manager): + """Test that None guards are skipped.""" + # Set a guard to None and verify no error + manager.precondition_guard = None + result = manager.check_all_guards("sqrt", {"x": 18}) + # Should still work (other guards run) + assert result is not None + + +class TestCheckReferences: + """Tests for check_references method.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_check_references_nested_dict(self, manager): + """Test check_references with nested dict.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + result = manager.check_references({"nested": {"value": "$v1"}}) + assert result.valid is True + assert "$v1" in result.resolved_refs + + def test_check_references_nested_list(self, manager): + """Test check_references with nested list.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + result = manager.check_references({"values": ["$v1", "$v1"]}) + assert result.valid is True + + def test_check_references_missing_ref(self, manager): + """Test check_references with missing reference.""" + result = manager.check_references({"value": "$v99"}) + assert result.valid is False + assert "$v99" in result.missing_refs + assert "Missing references" in result.message + + +class TestBudgetTracking: + """Tests for budget tracking methods.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_record_tool_call(self, manager): + """Test record_tool_call updates guards.""" + manager.record_tool_call("sqrt") + status = manager.get_budget_status() + assert status["execution"]["used"] > 0 or status["discovery"]["used"] > 0 + + def test_get_budget_status_no_guard(self, manager): + """Test get_budget_status when guard is None.""" + manager.budget_guard = None + status = manager.get_budget_status() + assert status["total"]["used"] == 0 + + def test_set_budget(self, manager): + """Test set_budget updates limits.""" + manager.set_budget(50) + assert manager.limits.tool_budget_total == 50 + + def test_get_discovery_status(self, manager): + """Test get_discovery_status.""" + status = manager.get_discovery_status() + assert "used" in status + assert "limit" in status + + def test_get_discovery_status_no_guard(self, manager): + """Test get_discovery_status when guard is None.""" + manager.budget_guard = None + status = manager.get_discovery_status() + assert status["used"] == 0 + + def test_get_execution_status(self, manager): + """Test get_execution_status.""" + status = manager.get_execution_status() + assert "used" in status + assert "limit" in status + + def test_get_execution_status_no_guard(self, manager): + """Test get_execution_status when guard is None.""" + manager.budget_guard = None + status = manager.get_execution_status() + assert status["used"] == 0 + + def test_is_discovery_exhausted(self, manager): + """Test is_discovery_exhausted.""" + # Initially not exhausted + assert manager.is_discovery_exhausted() is False + + def test_is_execution_exhausted(self, manager): + """Test is_execution_exhausted.""" + # Initially not exhausted + assert manager.is_execution_exhausted() is False + + def test_increment_discovery_call(self, manager): + """Test increment_discovery_call.""" + initial = manager.get_discovery_status()["used"] + manager.increment_discovery_call() + after = manager.get_discovery_status()["used"] + assert after > initial + + def test_increment_discovery_call_no_guard(self, manager): + """Test increment_discovery_call when guard is None.""" + manager.budget_guard = None + # Should not raise + manager.increment_discovery_call() + + def test_increment_execution_call(self, manager): + """Test increment_execution_call.""" + initial = manager.get_execution_status()["used"] + manager.increment_execution_call() + after = manager.get_execution_status()["used"] + assert after > initial + + def test_increment_execution_call_no_guard(self, manager): + """Test increment_execution_call when guard is None.""" + manager.budget_guard = None + # Should not raise + manager.increment_execution_call() + + def test_get_discovered_tools(self, manager): + """Test get_discovered_tools.""" + manager.register_discovered_tool("sqrt") + tools = manager.get_discovered_tools() + assert "sqrt" in tools + + def test_get_discovered_tools_no_guard(self, manager): + """Test get_discovered_tools when guard is None.""" + manager.budget_guard = None + tools = manager.get_discovered_tools() + assert tools == set() + + def test_is_tool_discovered(self, manager): + """Test is_tool_discovered.""" + manager.register_discovered_tool("sqrt") + assert manager.is_tool_discovered("sqrt") is True + assert manager.is_tool_discovered("nonexistent") is False + + def test_register_discovered_tool_no_guard(self, manager): + """Test register_discovered_tool when guard is None.""" + manager.budget_guard = None + # Should not raise + manager.register_discovered_tool("sqrt") + + +class TestNumericResultTracking: + """Tests for numeric result tracking.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_recent_numeric_results_no_guard(self, manager): + """Test _recent_numeric_results when guard is None.""" + manager.runaway_guard = None + results = manager._recent_numeric_results + assert results == [] + + +class TestUngroundedCallDetection: + """Tests for ungrounded call detection.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_check_ungrounded_call_no_guard(self, manager): + """Test check_ungrounded_call when guard is None.""" + manager.ungrounded_guard = None + result = manager.check_ungrounded_call("sqrt", {"x": 5}) + assert result.is_ungrounded is False + + def test_check_ungrounded_call_with_user_literal(self, manager): + """Test check_ungrounded_call with user-provided literal.""" + manager.register_user_literals("I want to compute sqrt of 5") + result = manager.check_ungrounded_call("sqrt", {"x": 5}) + # User literal should be allowed + assert result.is_ungrounded is False + + def test_check_ungrounded_call_ungrounded(self, manager): + """Test check_ungrounded_call with ungrounded args.""" + # First bind a value so bindings exist + manager.bind_value("sqrt", {"x": 18}, 4.2426) + # Now call with a literal that's not in bindings + result = manager.check_ungrounded_call("add", {"a": 999, "b": 888}) + # Should be detected as ungrounded (literals not from user or bindings) + assert result.is_ungrounded is True + assert len(result.numeric_args) > 0 + + def test_should_auto_rebound(self, manager): + """Test should_auto_rebound.""" + # Without bindings, should not auto-rebound + assert manager.should_auto_rebound("sqrt") is False + + # With bindings, idempotent math tools should auto-rebound + manager.bind_value("sqrt", {"x": 18}, 4.2426) + assert manager.should_auto_rebound("sqrt") is True + # Non-idempotent tools should not auto-rebound + assert manager.should_auto_rebound("normal_cdf") is False + + +class TestSoftBlockRepair: + """Tests for soft block repair functionality.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_try_soft_block_repair_no_bindings(self, manager): + """Test repair fails when no bindings exist.""" + from chuk_ai_session_manager.guards.models import UngroundedCallResult + + result = UngroundedCallResult( + is_ungrounded=True, + numeric_args=["x=5"], + has_bindings=False, + ) + should_proceed, repaired, fallback = manager.try_soft_block_repair( + "sqrt", {"x": 5}, result + ) + assert should_proceed is False + assert repaired is None + + def test_try_soft_block_repair_with_matching_binding(self, manager): + """Test repair succeeds when binding matches.""" + from chuk_ai_session_manager.guards.models import UngroundedCallResult + + manager.bind_value("sqrt", {"x": 18}, 4.2426) + result = UngroundedCallResult( + is_ungrounded=True, + numeric_args=["x=4.2426"], + has_bindings=True, + ) + should_proceed, repaired, fallback = manager.try_soft_block_repair( + "normal_cdf", {"x": 4.2426}, result + ) + assert should_proceed is True + assert repaired is not None + assert "$v1" in str(repaired["x"]) + + def test_try_soft_block_repair_no_matching_binding(self, manager): + """Test repair fails when no binding matches.""" + from chuk_ai_session_manager.guards.models import UngroundedCallResult + + manager.bind_value("sqrt", {"x": 18}, 4.2426) + result = UngroundedCallResult( + is_ungrounded=True, + numeric_args=["x=999.999"], + has_bindings=True, + ) + should_proceed, repaired, fallback = manager.try_soft_block_repair( + "normal_cdf", {"x": 999.999}, result + ) + assert should_proceed is False + assert repaired is None + assert fallback is not None + + def test_try_soft_block_repair_with_soft_block_reason(self, manager): + """Test repair with SoftBlockReason enum.""" + from chuk_ai_session_manager.guards.models import SoftBlockReason + + manager.bind_value("sqrt", {"x": 18}, 4.2426) + should_proceed, repaired, fallback = manager.try_soft_block_repair( + "normal_cdf", {"x": 4.2426}, SoftBlockReason.UNGROUNDED_ARGS + ) + assert should_proceed is True + + def test_try_soft_block_repair_unknown_reason(self, manager): + """Test repair with unknown reason returns False.""" + from chuk_ai_session_manager.guards.models import SoftBlockReason + + should_proceed, repaired, fallback = manager.try_soft_block_repair( + "sqrt", {"x": 5}, SoftBlockReason.BUDGET_EXHAUSTED + ) + assert should_proceed is False + + def test_try_soft_block_repair_no_bindings_soft_block_reason(self, manager): + """Test repair with SoftBlockReason when no bindings exist.""" + from chuk_ai_session_manager.guards.models import SoftBlockReason + + should_proceed, repaired, fallback = manager.try_soft_block_repair( + "sqrt", {"x": 5}, SoftBlockReason.UNGROUNDED_ARGS + ) + assert should_proceed is False + assert fallback is not None + assert "compute the required values" in fallback + + +class TestPerToolTracking: + """Tests for per-tool call tracking.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_get_tool_call_count(self, manager): + """Test get_tool_call_count.""" + assert manager.get_tool_call_count("sqrt") == 0 + manager.increment_tool_call("sqrt") + assert manager.get_tool_call_count("sqrt") == 1 + + def test_get_tool_call_count_namespaced(self, manager): + """Test get_tool_call_count with namespaced tool.""" + manager.increment_tool_call("math.sqrt") + assert manager.get_tool_call_count("math.sqrt") == 1 + + def test_increment_tool_call(self, manager): + """Test increment_tool_call.""" + manager.increment_tool_call("sqrt") + manager.increment_tool_call("sqrt") + assert manager.get_tool_call_count("sqrt") == 2 + + def test_increment_tool_call_no_guard(self, manager): + """Test increment_tool_call when guard is None.""" + manager.per_tool_guard = None + manager.increment_tool_call("sqrt") + assert manager.get_tool_call_count("sqrt") == 1 + + def test_track_tool_call(self, manager): + """Test track_tool_call returns status.""" + manager.increment_tool_call("sqrt") + status = manager.track_tool_call("sqrt") + assert status.tool_name == "sqrt" + assert status.call_count == 1 + + def test_format_tool_limit_warning(self, manager): + """Test format_tool_limit_warning.""" + manager.increment_tool_call("sqrt") + manager.increment_tool_call("sqrt") + manager.increment_tool_call("sqrt") + warning = manager.format_tool_limit_warning("sqrt") + assert "sqrt" in warning + assert "3" in warning + + def test_check_per_tool_limit(self, manager): + """Test check_per_tool_limit.""" + result = manager.check_per_tool_limit("sqrt") + assert result.verdict == GuardVerdict.ALLOW + + def test_check_per_tool_limit_no_guard(self, manager): + """Test check_per_tool_limit when guard is None.""" + manager.per_tool_guard = None + result = manager.check_per_tool_limit("sqrt") + assert result.verdict == GuardVerdict.ALLOW + + +class TestRunawayDetectionExtended: + """Additional tests for runaway detection.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_check_runaway_discovery_exhausted(self, manager): + """Test check_runaway when discovery budget exhausted.""" + # Exhaust discovery budget + for _ in range(manager.limits.discovery_budget + 1): + manager.increment_discovery_call() + status = manager.check_runaway("search_tools") + assert status.should_stop is True + assert status.budget_exhausted is True + + def test_check_runaway_execution_exhausted(self, manager): + """Test check_runaway when execution budget exhausted.""" + # Exhaust execution budget + for _ in range(manager.limits.execution_budget + 1): + manager.increment_execution_call() + status = manager.check_runaway("sqrt") + assert status.should_stop is True + assert status.budget_exhausted is True + + def test_check_runaway_total_exhausted(self, manager): + """Test check_runaway when total budget exhausted.""" + # Use up total budget + for _ in range(manager.limits.tool_budget_total + 1): + manager.increment_execution_call() + status = manager.check_runaway() + assert status.should_stop is True + + +class TestClassifyByResult: + """Tests for classify_by_result method.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_classify_by_result_list_tools(self, manager): + """Test classify_by_result with list_tools result.""" + result = { + "results": [ + {"name": "sqrt", "description": "Square root"}, + {"name": "add", "description": "Addition"}, + ] + } + manager.classify_by_result("list_tools", result) + assert manager.is_tool_discovered("sqrt") is True + assert manager.is_tool_discovered("add") is True + + def test_classify_by_result_get_tool_schema(self, manager): + """Test classify_by_result with get_tool_schema result.""" + result = { + "function": { + "name": "sqrt", + "description": "Square root", + } + } + manager.classify_by_result("get_tool_schema", result) + assert manager.is_tool_discovered("sqrt") is True + + def test_classify_by_result_non_dict(self, manager): + """Test classify_by_result with non-dict result.""" + # Should not raise + manager.classify_by_result("echo", "hello world") + + +class TestFormatting: + """Tests for formatting methods.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_format_state_for_model(self, manager): + """Test format_state_for_model.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + manager.cache_result("sqrt", {"x": 18}, 4.2426) + state = manager.format_state_for_model() + assert "$v1" in state + + def test_format_budget_status(self, manager): + """Test format_budget_status.""" + status = manager.format_budget_status() + assert "Discovery" in status + assert "Execution" in status + + def test_format_budget_status_no_guard(self, manager): + """Test format_budget_status when guard is None.""" + manager.budget_guard = None + status = manager.format_budget_status() + assert status == "" + + def test_format_bindings_for_model(self, manager): + """Test format_bindings_for_model.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + formatted = manager.format_bindings_for_model() + assert "$v1" in formatted + + def test_get_duplicate_count(self, manager): + """Test get_duplicate_count.""" + manager.cache_result("sqrt", {"x": 18}, 4.2426) + manager.get_cached_result("sqrt", {"x": 18}) + assert manager.get_duplicate_count() == 1 + + def test_format_discovery_exhausted_message(self, manager): + """Test format_discovery_exhausted_message.""" + msg = manager.format_discovery_exhausted_message() + assert "Discovery budget exhausted" in msg + + def test_format_execution_exhausted_message(self, manager): + """Test format_execution_exhausted_message.""" + msg = manager.format_execution_exhausted_message() + assert "Execution budget exhausted" in msg + + def test_format_budget_exhausted_message(self, manager): + """Test format_budget_exhausted_message.""" + msg = manager.format_budget_exhausted_message() + assert "Tool budget exhausted" in msg + + def test_format_saturation_message(self, manager): + """Test format_saturation_message.""" + msg = manager.format_saturation_message(0.99999999) + assert "saturation" in msg.lower() + assert "0.99999999" in msg + + def test_format_unused_warning_with_unused(self, manager): + """Test format_unused_warning with unused bindings.""" + manager.bind_value("sqrt", {"x": 18}, 4.2426) + manager.bind_value("multiply", {"a": 2, "b": 3}, 6) + warning = manager.format_unused_warning() + assert "$v1" in warning + assert "$v2" in warning + + def test_format_unused_warning_no_unused(self, manager): + """Test format_unused_warning with no unused bindings.""" + warning = manager.format_unused_warning() + assert warning == "" + + def test_format_unused_warning_many_unused(self, manager): + """Test format_unused_warning with many unused bindings.""" + for i in range(10): + manager.bind_value("sqrt", {"x": i}, float(i)) + warning = manager.format_unused_warning() + assert "+5 more" in warning + + +class TestExtractBindingsFromText: + """Tests for extract_bindings_from_text method.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_extract_simple_binding(self, manager): + """Test extracting simple variable assignment.""" + text = "The result is sigma = 5.5" + bindings = manager.extract_bindings_from_text(text) + assert len(bindings) >= 1 + # Check that sigma is an alias + binding = manager.get_binding("sigma") + assert binding is not None + assert binding.raw_value == 5.5 + + def test_extract_multiple_bindings(self, manager): + """Test extracting multiple variable assignments.""" + text = "mu = 37.5 and sigma = 5.5" + bindings = manager.extract_bindings_from_text(text) + assert len(bindings) >= 2 + + def test_extract_skips_code_context(self, manager): + """Test that code-like context is skipped.""" + text = "if x == 5 then do something" + bindings = manager.extract_bindings_from_text(text) + # Should not extract from comparison + assert len(bindings) == 0 + + def test_extract_scientific_notation(self, manager): + """Test extracting scientific notation.""" + text = "result = 1.5e-10" + bindings = manager.extract_bindings_from_text(text) + assert len(bindings) >= 1 + + def test_extract_negative_value(self, manager): + """Test extracting negative value.""" + text = "delta = -3.14" + bindings = manager.extract_bindings_from_text(text) + assert len(bindings) >= 1 + binding = manager.get_binding("delta") + assert binding is not None + assert binding.raw_value == -3.14 + + +class TestSetModeString: + """Tests for set_mode with string input.""" + + def test_set_mode_smooth_string(self): + """Test setting smooth mode with string.""" + manager = ToolStateManager() + manager.set_mode("smooth") + assert manager.limits.require_bindings == EnforcementLevel.WARN + + def test_set_mode_strict_string(self): + """Test setting strict mode with string.""" + manager = ToolStateManager() + manager.set_mode("strict") + assert manager.limits.require_bindings == EnforcementLevel.BLOCK + + def test_set_mode_uppercase_string(self): + """Test setting mode with uppercase string.""" + manager = ToolStateManager() + manager.set_mode("SMOOTH") + assert manager.limits.require_bindings == EnforcementLevel.WARN + + +class TestCheckToolPreconditions: + """Tests for check_tool_preconditions alias.""" + + @pytest.fixture + def manager(self): + return ToolStateManager() + + def test_check_tool_preconditions_is_alias(self, manager): + """Test that check_tool_preconditions is alias for check_preconditions.""" + # Both should return same result + r1 = manager.check_preconditions("normal_cdf", {"x": 1.5}) + r2 = manager.check_tool_preconditions("normal_cdf", {"x": 1.5}) + assert r1 == r2 + + +class TestPreconditionsNoGuard: + """Tests for precondition checks when guard is None.""" + + def test_check_preconditions_no_guard(self): + """Test check_preconditions when guard is None.""" + manager = ToolStateManager() + manager.precondition_guard = None + allowed, error = manager.check_preconditions("normal_cdf", {"x": 1.5}) + assert allowed is True + assert error is None diff --git a/tests/chat/state/test_models.py b/tests/chat/state/test_models.py new file mode 100644 index 00000000..88806a29 --- /dev/null +++ b/tests/chat/state/test_models.py @@ -0,0 +1,680 @@ +# tests/chat/state/test_models.py +"""Tests for state models.""" + +import pytest + +from chuk_ai_session_manager.guards.models import ( + CachedToolResult, + NamedVariable, + PerToolCallStatus, + RepairAction, + RunawayStatus, + RuntimeLimits, + SoftBlock, + SoftBlockReason, + UngroundedCallResult, + ValueBinding, + ValueType, + classify_value_type, + compute_args_hash, +) + + +class TestValueType: + """Tests for ValueType enum and classify_value_type.""" + + def test_classify_int(self): + assert classify_value_type(42) == ValueType.NUMBER + + def test_classify_float(self): + assert classify_value_type(3.14159) == ValueType.NUMBER + + def test_classify_numeric_string(self): + assert classify_value_type("4.2426") == ValueType.NUMBER + + def test_classify_non_numeric_string(self): + assert classify_value_type("hello world") == ValueType.STRING + + def test_classify_list(self): + assert classify_value_type([1, 2, 3]) == ValueType.LIST + + def test_classify_dict(self): + assert classify_value_type({"key": "value"}) == ValueType.OBJECT + + def test_classify_none(self): + assert classify_value_type(None) == ValueType.UNKNOWN + + +class TestComputeArgsHash: + """Tests for compute_args_hash.""" + + def test_consistent_hash(self): + args = {"x": 18, "y": 37} + hash1 = compute_args_hash(args) + hash2 = compute_args_hash(args) + assert hash1 == hash2 + + def test_order_independent(self): + hash1 = compute_args_hash({"a": 1, "b": 2}) + hash2 = compute_args_hash({"b": 2, "a": 1}) + assert hash1 == hash2 + + def test_different_args_different_hash(self): + hash1 = compute_args_hash({"x": 18}) + hash2 = compute_args_hash({"x": 19}) + assert hash1 != hash2 + + +class TestValueBinding: + """Tests for ValueBinding model.""" + + def test_create_binding(self): + binding = ValueBinding( + id="v1", + tool_name="sqrt", + args_hash="abc123", + raw_value=4.2426, + value_type=ValueType.NUMBER, + ) + assert binding.id == "v1" + assert binding.tool_name == "sqrt" + assert binding.raw_value == 4.2426 + assert binding.value_type == ValueType.NUMBER + + def test_typed_value_coercion(self): + binding = ValueBinding( + id="v1", + tool_name="sqrt", + args_hash="abc123", + raw_value="4.2426", + value_type=ValueType.NUMBER, + ) + assert binding.typed_value == pytest.approx(4.2426) + + def test_format_for_model_number(self): + binding = ValueBinding( + id="v1", + tool_name="sqrt", + args_hash="abc123", + raw_value=4.2426, + value_type=ValueType.NUMBER, + ) + formatted = binding.format_for_model() + assert "$v1" in formatted + assert "sqrt" in formatted + + +class TestCachedToolResult: + """Tests for CachedToolResult model.""" + + def test_signature_generation(self): + result = CachedToolResult( + tool_name="sqrt", + arguments={"x": 18}, + result=4.242640687119285, + ) + assert result.signature == 'sqrt:{"x": 18}' + + def test_is_numeric_with_float(self): + result = CachedToolResult( + tool_name="sqrt", + arguments={"x": 18}, + result=4.242640687119285, + ) + assert result.is_numeric is True + assert result.numeric_value == pytest.approx(4.242640687119285) + + def test_is_numeric_with_int(self): + result = CachedToolResult( + tool_name="add", + arguments={"a": 1, "b": 2}, + result=3, + ) + assert result.is_numeric is True + assert result.numeric_value == 3.0 + + def test_is_numeric_with_string_number(self): + result = CachedToolResult( + tool_name="sqrt", + arguments={"x": 18}, + result="4.242640687119285", + ) + assert result.is_numeric is True + + def test_is_numeric_with_non_numeric(self): + result = CachedToolResult( + tool_name="echo", + arguments={"msg": "hello"}, + result="hello world", + ) + assert result.is_numeric is False + assert result.numeric_value is None + + def test_format_compact_numeric(self): + result = CachedToolResult( + tool_name="sqrt", + arguments={"x": 18}, + result=4.242640687119285, + ) + formatted = result.format_compact() + assert "sqrt" in formatted + assert "4.24264" in formatted + + +class TestNamedVariable: + """Tests for NamedVariable model.""" + + def test_format_with_units(self): + var = NamedVariable(name="sigma", value=5.5, units="units/day") + formatted = var.format_compact() + assert "sigma" in formatted + assert "5.5" in formatted + assert "units/day" in formatted + + def test_format_without_units(self): + var = NamedVariable(name="mu", value=37.0) + formatted = var.format_compact() + assert "mu" in formatted + assert "37" in formatted + + +class TestRunawayStatus: + """Tests for RunawayStatus model.""" + + def test_default_status(self): + status = RunawayStatus() + assert status.should_stop is False + assert status.budget_exhausted is False + assert status.degenerate_detected is False + + def test_budget_exhausted_message(self): + status = RunawayStatus( + should_stop=True, + budget_exhausted=True, + calls_remaining=0, + ) + assert "budget" in status.message.lower() + + def test_degenerate_message(self): + status = RunawayStatus( + should_stop=True, + degenerate_detected=True, + ) + assert "degenerate" in status.message.lower() + + def test_saturation_message(self): + status = RunawayStatus( + should_stop=True, + saturation_detected=True, + ) + assert "saturation" in status.message.lower() + + +class TestSoftBlock: + """Tests for SoftBlock model.""" + + def test_create_soft_block(self): + block = SoftBlock( + reason=SoftBlockReason.UNGROUNDED_ARGS, + tool_name="normal_cdf", + arguments={"x": 1.5}, + ) + assert block.reason == SoftBlockReason.UNGROUNDED_ARGS + assert block.tool_name == "normal_cdf" + + def test_can_repair(self): + block = SoftBlock( + reason=SoftBlockReason.UNGROUNDED_ARGS, + repair_attempts=0, + max_repairs=3, + ) + assert block.can_repair is True + + def test_cannot_repair_exhausted(self): + block = SoftBlock( + reason=SoftBlockReason.UNGROUNDED_ARGS, + repair_attempts=3, + max_repairs=3, + ) + assert block.can_repair is False + + def test_next_repair_action_ungrounded(self): + block = SoftBlock(reason=SoftBlockReason.UNGROUNDED_ARGS) + assert block.next_repair_action == RepairAction.REBIND_FROM_EXISTING + + def test_next_repair_action_missing_refs(self): + block = SoftBlock(reason=SoftBlockReason.MISSING_REFS) + assert block.next_repair_action == RepairAction.COMPUTE_MISSING + + +class TestRuntimeLimits: + """Tests for RuntimeLimits model.""" + + def test_default_limits(self): + limits = RuntimeLimits() + assert limits.discovery_budget > 0 + assert limits.execution_budget > 0 + assert limits.tool_budget_total > 0 + + def test_smooth_preset(self): + limits = RuntimeLimits.smooth() + assert limits.require_bindings == "warn" + assert limits.ungrounded_grace_calls > 0 + + def test_strict_preset(self): + limits = RuntimeLimits.strict() + assert limits.require_bindings == "block" + assert limits.ungrounded_grace_calls == 0 + + +class TestUngroundedCallResult: + """Tests for UngroundedCallResult model.""" + + def test_not_ungrounded(self): + result = UngroundedCallResult(is_ungrounded=False) + assert result.is_ungrounded is False + + def test_ungrounded_with_args(self): + result = UngroundedCallResult( + is_ungrounded=True, + numeric_args=["x=1.5", "y=2.5"], + has_bindings=True, + message="Ungrounded numeric arguments detected", + ) + assert result.is_ungrounded is True + assert len(result.numeric_args) == 2 + assert result.has_bindings is True + + +class TestPerToolCallStatus: + """Tests for PerToolCallStatus model.""" + + def test_under_limit(self): + status = PerToolCallStatus( + tool_name="sqrt", + call_count=1, + max_calls=3, + ) + assert status.requires_justification is False + + def test_at_limit(self): + status = PerToolCallStatus( + tool_name="sqrt", + call_count=3, + max_calls=3, + requires_justification=True, + ) + assert status.requires_justification is True + + +# ============================================================================= +# Additional coverage tests for models.py +# ============================================================================= + + +class TestValueBindingTypedValue: + """Tests for ValueBinding.typed_value edge cases.""" + + def test_typed_value_invalid_conversion(self): + """Test typed_value when conversion fails.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value="not-a-number", + value_type=ValueType.NUMBER, # Misclassified + ) + # Should return raw value when conversion fails + assert binding.typed_value == "not-a-number" + + def test_typed_value_int(self): + """Test typed_value with int.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=42, + value_type=ValueType.NUMBER, + ) + assert binding.typed_value == 42.0 + + def test_typed_value_non_number(self): + """Test typed_value with non-number type.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=["a", "b"], + value_type=ValueType.LIST, + ) + assert binding.typed_value == ["a", "b"] + + +class TestValueBindingFormatForModel: + """Tests for ValueBinding.format_for_model edge cases.""" + + def test_format_scientific_large(self): + """Test format_for_model with large number (scientific notation).""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=1.5e10, + value_type=ValueType.NUMBER, + ) + formatted = binding.format_for_model() + assert "e" in formatted.lower() or "E" in formatted + + def test_format_scientific_small(self): + """Test format_for_model with small number (scientific notation).""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=1.5e-10, + value_type=ValueType.NUMBER, + ) + formatted = binding.format_for_model() + assert "e" in formatted.lower() or "E" in formatted + + def test_format_string_long(self): + """Test format_for_model with long string (truncated).""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value="x" * 100, + value_type=ValueType.STRING, + ) + formatted = binding.format_for_model() + assert "..." in formatted + assert len(formatted) < 150 + + def test_format_string_short(self): + """Test format_for_model with short string.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value="hello", + value_type=ValueType.STRING, + ) + formatted = binding.format_for_model() + assert '"hello"' in formatted + + def test_format_empty_list(self): + """Test format_for_model with empty list.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=[], + value_type=ValueType.LIST, + ) + formatted = binding.format_for_model() + assert "[]" in formatted + + def test_format_small_list(self): + """Test format_for_model with small list.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=[1, 2, 3], + value_type=ValueType.LIST, + ) + formatted = binding.format_for_model() + assert "[" in formatted + + def test_format_large_list(self): + """Test format_for_model with large list.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=list(range(100)), + value_type=ValueType.LIST, + ) + formatted = binding.format_for_model() + assert "100 items" in formatted + + def test_format_list_non_list_value(self): + """Test format_for_model with LIST type but non-list value.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value="not-a-list", + value_type=ValueType.LIST, + ) + formatted = binding.format_for_model() + assert "not-a-list" in formatted + + def test_format_empty_object(self): + """Test format_for_model with empty object.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value={}, + value_type=ValueType.OBJECT, + ) + formatted = binding.format_for_model() + assert "{}" in formatted + + def test_format_small_object(self): + """Test format_for_model with small object.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value={"a": 1, "b": 2}, + value_type=ValueType.OBJECT, + ) + formatted = binding.format_for_model() + assert "keys" in formatted + + def test_format_large_object(self): + """Test format_for_model with large object.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value={f"key{i}": i for i in range(20)}, + value_type=ValueType.OBJECT, + ) + formatted = binding.format_for_model() + assert "20 keys" in formatted + + def test_format_object_non_dict_value(self): + """Test format_for_model with OBJECT type but non-dict value.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value="not-a-dict", + value_type=ValueType.OBJECT, + ) + formatted = binding.format_for_model() + assert "not-a-dict" in formatted + + def test_format_unknown_type(self): + """Test format_for_model with UNKNOWN type.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=None, + value_type=ValueType.UNKNOWN, + ) + formatted = binding.format_for_model() + assert "None" in formatted + + def test_format_with_aliases(self): + """Test format_for_model includes aliases.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value=42, + value_type=ValueType.NUMBER, + aliases=["sigma", "std_dev"], + ) + formatted = binding.format_for_model() + assert "sigma" in formatted + assert "std_dev" in formatted + + def test_format_typed_value_non_float(self): + """Test format_for_model when typed_value is not float.""" + binding = ValueBinding( + id="v1", + tool_name="test", + args_hash="abc123", + raw_value="invalid", + value_type=ValueType.NUMBER, + ) + formatted = binding.format_for_model() + # Should use str() for non-float typed value + assert "invalid" in formatted + + +class TestRunawayStatusMessage: + """Tests for RunawayStatus.message edge cases.""" + + def test_message_unknown_reason(self): + """Test message with unknown stop reason.""" + status = RunawayStatus(should_stop=True) + assert status.message == "Unknown stop reason" + + def test_message_custom_reason(self): + """Test message with custom reason.""" + status = RunawayStatus(should_stop=True, reason="Custom stop reason") + assert status.message == "Custom stop reason" + + +class TestCachedToolResultFormatArgs: + """Tests for CachedToolResult._format_args edge cases.""" + + def test_format_args_empty(self): + """Test _format_args with no arguments.""" + result = CachedToolResult( + tool_name="test", + arguments={}, + result="output", + ) + formatted = result.format_compact() + assert "test()" in formatted + + def test_format_args_single_numeric(self): + """Test _format_args with single numeric arg.""" + result = CachedToolResult( + tool_name="sqrt", + arguments={"x": 18}, + result=4.2426, + ) + formatted = result.format_compact() + assert "18" in formatted + + def test_format_args_multiple(self): + """Test _format_args with multiple args.""" + result = CachedToolResult( + tool_name="add", + arguments={"a": 1, "b": 2}, + result=3, + ) + formatted = result.format_compact() + assert "a=" in formatted + assert "b=" in formatted + + def test_format_args_string_short(self): + """Test _format_args with short string arg.""" + result = CachedToolResult( + tool_name="echo", + arguments={"msg": "hello"}, + result="hello", + ) + formatted = result.format_compact() + assert '"hello"' in formatted + + def test_format_args_string_long(self): + """Test _format_args with long string arg (truncated).""" + result = CachedToolResult( + tool_name="echo", + arguments={"msg": "x" * 100}, + result="long", + ) + formatted = result.format_compact() + assert "..." in formatted + + def test_format_compact_long_result(self): + """Test format_compact with long non-numeric result.""" + result = CachedToolResult( + tool_name="echo", + arguments={"msg": "hi"}, + result="x" * 100, + ) + formatted = result.format_compact() + assert "..." in formatted + + def test_format_compact_numeric_int(self): + """Test format_compact with integer result.""" + result = CachedToolResult( + tool_name="add", + arguments={"a": 1, "b": 2}, + result=3, + ) + formatted = result.format_compact() + assert "3" in formatted + + def test_numeric_value_with_none_result(self): + """Test numeric_value property when result is not numeric.""" + result = CachedToolResult( + tool_name="echo", + arguments={"msg": "hi"}, + result={"key": "value"}, # Not numeric + ) + assert result.numeric_value is None + + +class TestSoftBlockNextRepairAction: + """Tests for SoftBlock.next_repair_action edge cases.""" + + def test_next_repair_action_missing_dependency(self): + """Test next_repair_action with MISSING_DEPENDENCY reason.""" + block = SoftBlock(reason=SoftBlockReason.MISSING_DEPENDENCY) + assert block.next_repair_action == RepairAction.COMPUTE_MISSING + + def test_next_repair_action_budget_exhausted(self): + """Test next_repair_action with BUDGET_EXHAUSTED reason.""" + block = SoftBlock(reason=SoftBlockReason.BUDGET_EXHAUSTED) + assert block.next_repair_action == RepairAction.ASK_USER + + def test_next_repair_action_per_tool_limit(self): + """Test next_repair_action with PER_TOOL_LIMIT reason.""" + block = SoftBlock(reason=SoftBlockReason.PER_TOOL_LIMIT) + assert block.next_repair_action == RepairAction.ASK_USER + + +class TestToolClassificationMethods: + """Tests for ToolClassification class methods.""" + + def test_is_discovery_tool_namespaced(self): + """Test is_discovery_tool with namespaced tool.""" + from chuk_ai_session_manager.guards.models import ToolClassification + + assert ToolClassification.is_discovery_tool("namespace.search_tools") is True + assert ToolClassification.is_discovery_tool("namespace.sqrt") is False + + def test_is_idempotent_math_tool_namespaced(self): + """Test is_idempotent_math_tool with namespaced tool.""" + from chuk_ai_session_manager.guards.models import ToolClassification + + assert ToolClassification.is_idempotent_math_tool("math.sqrt") is True + assert ToolClassification.is_idempotent_math_tool("stats.normal_cdf") is False + + def test_is_parameterized_tool_namespaced(self): + """Test is_parameterized_tool with namespaced tool.""" + from chuk_ai_session_manager.guards.models import ToolClassification + + assert ToolClassification.is_parameterized_tool("stats.normal_cdf") is True + assert ToolClassification.is_parameterized_tool("math.sqrt") is False diff --git a/tests/chat/test_chat_context.py b/tests/chat/test_chat_context.py index fe99da9e..22ad9378 100644 --- a/tests/chat/test_chat_context.py +++ b/tests/chat/test_chat_context.py @@ -123,6 +123,8 @@ def chat_context(dummy_tool_manager, monkeypatch): mock_model_manager.provider = "mock" mock_model_manager.model = "mock-model" mock_model_manager.get_client.return_value = None + mock_model_manager.get_active_provider.return_value = "mock" + mock_model_manager.get_active_model.return_value = "mock-model" ctx = ChatContext.create( tool_manager=dummy_tool_manager, model_manager=mock_model_manager @@ -164,16 +166,580 @@ async def test_get_server_for_tool(chat_context): @pytest.mark.asyncio async def test_to_dict_and_update_roundtrip(chat_context): await chat_context.initialize() - original_len = chat_context.get_conversation_length() exported = chat_context.to_dict() - # mutate exported copy + # update_from_dict handles exit_requested but not conversation_history exported["exit_requested"] = True - exported["conversation_history"].append({"role": "user", "content": "Hi"}) chat_context.update_from_dict(exported) assert chat_context.exit_requested is True - assert chat_context.get_conversation_length() == original_len + 1 - assert chat_context.conversation_history[-1].content == "Hi" + + +# --------------------------------------------------------------------------- +# Additional coverage tests +# --------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_find_tool_by_name_exact(chat_context): + """Test find_tool_by_name with exact match.""" + await chat_context.initialize() + tool = chat_context.find_tool_by_name("tool1") + assert tool is not None + assert tool.name == "tool1" + + +@pytest.mark.asyncio +async def test_find_tool_by_name_fully_qualified(chat_context): + """Test find_tool_by_name with fully qualified name.""" + await chat_context.initialize() + # Try using srv1.tool1 format + tool = chat_context.find_tool_by_name("srv1.tool1") + assert tool is not None + + +@pytest.mark.asyncio +async def test_find_tool_by_name_not_found(chat_context): + """Test find_tool_by_name with non-existent tool.""" + await chat_context.initialize() + tool = chat_context.find_tool_by_name("nonexistent") + assert tool is None + + +@pytest.mark.asyncio +async def test_find_server_by_name(chat_context): + """Test find_server_by_name.""" + await chat_context.initialize() + server = chat_context.find_server_by_name("srv1") + assert server is not None + assert server.name == "srv1" + + +@pytest.mark.asyncio +async def test_find_server_by_name_not_found(chat_context): + """Test find_server_by_name with non-existent server.""" + await chat_context.initialize() + server = chat_context.find_server_by_name("nonexistent") + assert server is None + + +@pytest.mark.asyncio +async def test_add_user_message(chat_context): + """Test add_user_message.""" + await chat_context.initialize() + initial_len = len(chat_context.conversation_history) + await chat_context.add_user_message("Hello!") + assert len(chat_context.conversation_history) == initial_len + 1 + assert chat_context.conversation_history[-1].content == "Hello!" + assert chat_context.conversation_history[-1].role.value == "user" + + +@pytest.mark.asyncio +async def test_add_assistant_message(chat_context): + """Test add_assistant_message.""" + await chat_context.initialize() + initial_len = len(chat_context.conversation_history) + await chat_context.add_assistant_message("Hi there!") + assert len(chat_context.conversation_history) == initial_len + 1 + assert chat_context.conversation_history[-1].content == "Hi there!" + assert chat_context.conversation_history[-1].role.value == "assistant" + + +@pytest.mark.asyncio +async def test_clear_conversation_history_keep_system(chat_context): + """Test clear_conversation_history with keep_system_prompt=True.""" + await chat_context.initialize() + await chat_context.add_user_message("Hello") + await chat_context.add_assistant_message("Hi") + + await chat_context.clear_conversation_history(keep_system_prompt=True) + + assert len(chat_context.conversation_history) == 1 + assert chat_context.conversation_history[0].role.value == "system" + + +@pytest.mark.asyncio +async def test_clear_conversation_history_remove_all(chat_context): + """Test clear_conversation_history creates fresh session (system prompt always kept).""" + await chat_context.initialize() + await chat_context.add_user_message("Hello") + + await chat_context.clear_conversation_history(keep_system_prompt=False) + + # Implementation always creates fresh session with system prompt + assert len(chat_context.conversation_history) == 1 + assert chat_context.conversation_history[0].role.value == "system" + + +@pytest.mark.asyncio +async def test_regenerate_system_prompt(chat_context): + """Test regenerate_system_prompt.""" + await chat_context.initialize() + _ = chat_context.conversation_history[0].content # Original prompt + + # Regenerate should update the system prompt + await chat_context.regenerate_system_prompt() + + # Should still be the first message + assert chat_context.conversation_history[0].role.value == "system" + + +@pytest.mark.asyncio +async def test_get_tool_count(chat_context): + """Test get_tool_count.""" + await chat_context.initialize() + assert chat_context.get_tool_count() == 2 + + +@pytest.mark.asyncio +async def test_get_server_count(chat_context): + """Test get_server_count.""" + await chat_context.initialize() + assert chat_context.get_server_count() == 2 + + +def test_get_display_name_for_tool(): + """Test get_display_name_for_tool static method.""" + from mcp_cli.chat.chat_context import ChatContext + + name = ChatContext.get_display_name_for_tool("srv1.tool1") + assert name == "srv1.tool1" + + +@pytest.mark.asyncio +async def test_get_status_summary(chat_context): + """Test get_status_summary.""" + await chat_context.initialize() + status = chat_context.get_status_summary() + assert status.tool_count == 2 + assert status.server_count == 2 + + +@pytest.mark.asyncio +async def test_repr(chat_context): + """Test __repr__.""" + await chat_context.initialize() + repr_str = repr(chat_context) + assert "ChatContext" in repr_str + assert "tools=2" in repr_str + + +@pytest.mark.asyncio +async def test_str(chat_context): + """Test __str__.""" + await chat_context.initialize() + str_val = str(chat_context) + assert "Chat session" in str_val + assert "2 tools" in str_val + + +@pytest.mark.asyncio +async def test_context_manager(dummy_tool_manager, monkeypatch): + """Test async context manager.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + mock_model_manager = Mock(spec=ModelManager) + mock_model_manager.provider = "mock" + mock_model_manager.model = "mock-model" + mock_model_manager.get_client.return_value = None + mock_model_manager.get_active_provider.return_value = "mock" + mock_model_manager.get_active_model.return_value = "mock-model" + + from mcp_cli.chat.chat_context import ChatContext + + async with ChatContext.create( + tool_manager=dummy_tool_manager, model_manager=mock_model_manager + ) as ctx: + assert ctx.get_tool_count() == 2 + + +@pytest.mark.asyncio +async def test_update_from_dict_with_exit_requested(chat_context): + """Test update_from_dict updates exit_requested.""" + await chat_context.initialize() + + chat_context.update_from_dict({"exit_requested": True}) + assert chat_context.exit_requested is True + + chat_context.update_from_dict({"exit_requested": False}) + assert chat_context.exit_requested is False + + +@pytest.mark.asyncio +async def test_execute_tool(chat_context): + """Test execute_tool delegation.""" + await chat_context.initialize() + result = await chat_context.execute_tool("tool1", {"arg": "value"}) + assert result["success"] is True + + +@pytest.mark.asyncio +async def test_stream_execute_tool(chat_context): + """Test stream_execute_tool delegation.""" + await chat_context.initialize() + results = [] + async for result in chat_context.stream_execute_tool("tool1", {"arg": "value"}): + results.append(result) + assert len(results) >= 1 + + +@pytest.mark.asyncio +async def test_refresh_after_model_change(chat_context): + """Test refresh_after_model_change.""" + await chat_context.initialize() + # Should not raise + await chat_context.refresh_after_model_change() + assert chat_context.get_tool_count() == 2 + + +@pytest.mark.asyncio +async def test_create_with_provider_only(dummy_tool_manager, monkeypatch): + """Test ChatContext.create with provider only.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + # Create new ModelManager instance for this test + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "openai" + mock_manager.get_active_model.return_value = "gpt-4" + mock_manager.switch_provider.return_value = None + + # Patch ModelManager constructor + with monkeypatch.context() as m: + m.setattr("mcp_cli.chat.chat_context.ModelManager", lambda: mock_manager) + ctx = ChatContext.create( + tool_manager=dummy_tool_manager, + provider="openai", + ) + assert ctx is not None + + +@pytest.mark.asyncio +async def test_create_with_model_only(dummy_tool_manager, monkeypatch): + """Test ChatContext.create with model only.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "openai" + mock_manager.get_active_model.return_value = "gpt-4" + mock_manager.switch_model.return_value = None + + with monkeypatch.context() as m: + m.setattr("mcp_cli.chat.chat_context.ModelManager", lambda: mock_manager) + ctx = ChatContext.create( + tool_manager=dummy_tool_manager, + model="gpt-4", + ) + assert ctx is not None + + +@pytest.mark.asyncio +async def test_create_with_provider_and_api_settings(dummy_tool_manager, monkeypatch): + """Test ChatContext.create with provider and API settings.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "custom" + mock_manager.get_active_model.return_value = "custom-model" + mock_manager.add_runtime_provider.return_value = None + mock_manager.switch_provider.return_value = None + + with monkeypatch.context() as m: + m.setattr("mcp_cli.chat.chat_context.ModelManager", lambda: mock_manager) + ctx = ChatContext.create( + tool_manager=dummy_tool_manager, + provider="custom", + api_base="http://localhost:8080", + api_key="test-key", + ) + assert ctx is not None + + +@pytest.mark.asyncio +async def test_create_with_provider_model_and_api_settings( + dummy_tool_manager, monkeypatch +): + """Test ChatContext.create with all settings.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "custom" + mock_manager.get_active_model.return_value = "custom-model" + mock_manager.add_runtime_provider.return_value = None + mock_manager.switch_model.return_value = None + + with monkeypatch.context() as m: + m.setattr("mcp_cli.chat.chat_context.ModelManager", lambda: mock_manager) + ctx = ChatContext.create( + tool_manager=dummy_tool_manager, + provider="custom", + model="custom-model", + api_key="test-key", + ) + assert ctx is not None + + +@pytest.mark.asyncio +async def test_initialize_failure(dummy_tool_manager, monkeypatch): + """Test initialize handles errors.""" + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + # Patch generate_system_prompt to avoid issues + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "mock" + mock_manager.get_active_model.return_value = "mock-model" + + # Create context + ctx = ChatContext.create( + tool_manager=dummy_tool_manager, model_manager=mock_manager + ) + + # Make _initialize_tools raise an exception + async def raise_error(): + raise RuntimeError("Test error") + + ctx._initialize_tools = raise_error + + result = await ctx.initialize() + assert result is False + + +@pytest.mark.asyncio +async def test_regenerate_system_prompt_insert(dummy_tool_manager, monkeypatch): + """Test regenerate_system_prompt when no system message exists.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "mock" + mock_manager.get_active_model.return_value = "mock-model" + + ctx = ChatContext.create( + tool_manager=dummy_tool_manager, model_manager=mock_manager + ) + await ctx.initialize() + + # Clear conversation history completely + await ctx.clear_conversation_history(keep_system_prompt=False) + + # Regenerate should insert at position 0 + await ctx.regenerate_system_prompt() + + assert len(ctx.conversation_history) == 1 + assert ctx.conversation_history[0].role.value == "system" + + +@pytest.mark.asyncio +async def test_context_manager_failure(dummy_tool_manager, monkeypatch): + """Test async context manager handles initialization failure.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "mock" + mock_manager.get_active_model.return_value = "mock-model" + + ctx = ChatContext.create( + tool_manager=dummy_tool_manager, model_manager=mock_manager + ) + + # Make initialize return False + async def fail_init(): + return False + + ctx.initialize = fail_init + + with pytest.raises(RuntimeError, match="Failed to initialize"): + async with ctx: + pass + + +@pytest.mark.asyncio +async def test_adapt_tools_without_get_adapted_tools(dummy_tool_manager, monkeypatch): + """Test _adapt_tools_for_provider fallback when get_adapted_tools_for_llm not available.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + # Create tool manager without get_adapted_tools_for_llm + class MinimalToolManager: + def __init__(self): + self._tools = [ + ToolInfo( + name="tool1", + namespace="srv1", + description="demo-1", + parameters={}, + is_async=False, + ), + ] + + async def get_unique_tools(self): + return self._tools + + async def get_server_info(self): + return [] + + async def get_tools_for_llm(self): + return [{"type": "function", "function": {"name": "tool1"}}] + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "mock" + mock_manager.get_active_model.return_value = "mock-model" + + ctx = ChatContext.create( + tool_manager=MinimalToolManager(), model_manager=mock_manager + ) + await ctx.initialize() + + assert len(ctx.openai_tools) == 1 + assert ctx.tool_name_mapping == {} + + +@pytest.mark.asyncio +async def test_adapt_tools_exception_fallback(dummy_tool_manager, monkeypatch): + """Test _adapt_tools_for_provider handles exceptions.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + # Create tool manager that raises in get_adapted_tools_for_llm + class FailingToolManager: + def __init__(self): + self._tools = [ + ToolInfo( + name="tool1", + namespace="srv1", + description="demo-1", + parameters={}, + is_async=False, + ), + ] + + async def get_unique_tools(self): + return self._tools + + async def get_server_info(self): + return [] + + async def get_adapted_tools_for_llm(self, provider): + raise RuntimeError("Adaptation failed") + + async def get_tools_for_llm(self): + return [{"type": "function", "function": {"name": "tool1"}}] + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "mock" + mock_manager.get_active_model.return_value = "mock-model" + + ctx = ChatContext.create( + tool_manager=FailingToolManager(), model_manager=mock_manager + ) + await ctx.initialize() + + # Should fall back to get_tools_for_llm + assert len(ctx.openai_tools) == 1 + + +@pytest.mark.asyncio +async def test_initialize_no_tools_warning(monkeypatch, capsys): + """Test initialize prints warning when no tools available.""" + monkeypatch.setattr( + "mcp_cli.chat.chat_context.generate_system_prompt", lambda tools: "SYS_PROMPT" + ) + + from unittest.mock import Mock + from mcp_cli.model_management import ModelManager + + # Create tool manager that returns no tools + class EmptyToolManager: + async def get_unique_tools(self): + return [] + + async def get_server_info(self): + return [] + + async def get_adapted_tools_for_llm(self, provider): + return [], {} + + async def get_tools_for_llm(self): + return [] + + mock_manager = Mock(spec=ModelManager) + mock_manager.get_client.return_value = None + mock_manager.get_active_provider.return_value = "mock" + mock_manager.get_active_model.return_value = "mock-model" + + ctx = ChatContext.create( + tool_manager=EmptyToolManager(), model_manager=mock_manager + ) + result = await ctx.initialize() + + assert result is True + assert ctx.get_tool_count() == 0 + + +@pytest.mark.asyncio +async def test_find_tool_by_name_partial_match(chat_context): + """Test find_tool_by_name with partial match (just tool name without namespace).""" + await chat_context.initialize() + # The dummy tools have namespace like "srv1" and name like "tool1" + # Try to find by using a dotted name that doesn't match exactly + # but the simple name part matches + tool = chat_context.find_tool_by_name("other.tool1") + assert tool is not None + assert tool.name == "tool1" diff --git a/tests/chat/test_conversation.py b/tests/chat/test_conversation.py new file mode 100644 index 00000000..5417128e --- /dev/null +++ b/tests/chat/test_conversation.py @@ -0,0 +1,1546 @@ +# tests/chat/test_conversation.py +"""Tests for ConversationProcessor.""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from mcp_cli.chat.conversation import ConversationProcessor +from mcp_cli.chat.response_models import ( + CompletionResponse, + Message, + MessageRole, + ToolCall, + FunctionCall, +) + + +class MockUIManager: + """Mock UI manager for testing.""" + + def __init__(self): + self.is_streaming_response = False + self.streaming_handler = None + self.display = MagicMock() + + async def start_streaming_response(self): + self.is_streaming_response = True + + async def stop_streaming_response(self): + self.is_streaming_response = False + + async def print_assistant_message(self, content, elapsed): + pass + + +class MockContext: + """Mock context for testing.""" + + def __init__(self): + self.conversation_history = [] + self.openai_tools = [] + self.tool_name_mapping = {} + self.client = MagicMock() + self.tool_manager = MagicMock() + # Make tool_manager async methods work properly + self.tool_manager.get_adapted_tools_for_llm = AsyncMock(return_value=([], {})) + self.provider = "openai" + + async def add_assistant_message(self, content): + """Add assistant message to conversation history.""" + self.conversation_history.append( + Message(role=MessageRole.ASSISTANT, content=content) + ) + + def inject_assistant_message(self, message): + """Inject a message into conversation history.""" + self.conversation_history.append(message) + + def inject_tool_message(self, message): + """Inject a tool message into conversation history.""" + self.conversation_history.append(message) + + +class TestConversationProcessorInit: + """Tests for ConversationProcessor initialization.""" + + def test_init(self): + """Test basic initialization.""" + context = MockContext() + ui_manager = MockUIManager() + + processor = ConversationProcessor(context, ui_manager) + + assert processor.context is context + assert processor.ui_manager is ui_manager + assert processor.tool_processor is not None + assert processor._consecutive_duplicate_count == 0 + assert processor._max_consecutive_duplicates == 5 + + def test_init_with_runtime_config(self): + """Test initialization with runtime config.""" + context = MockContext() + ui_manager = MockUIManager() + runtime_config = {"some": "config"} + + processor = ConversationProcessor(context, ui_manager, runtime_config) + + assert processor.runtime_config == runtime_config + + +class TestRegisterUserLiterals: + """Tests for _register_user_literals_from_history.""" + + def test_register_from_user_message(self): + """Test registering literals from user message.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate sqrt of 18") + ] + ui_manager = MockUIManager() + + processor = ConversationProcessor(context, ui_manager) + count = processor._register_user_literals_from_history() + + # Should register at least the number 18 + assert count >= 1 + + def test_register_empty_history(self): + """Test with empty history.""" + context = MockContext() + context.conversation_history = [] + ui_manager = MockUIManager() + + processor = ConversationProcessor(context, ui_manager) + count = processor._register_user_literals_from_history() + + assert count == 0 + + def test_register_only_most_recent(self): + """Test that only most recent user message is processed.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="First message with 10"), + Message(role=MessageRole.ASSISTANT, content="Response"), + Message(role=MessageRole.USER, content="Second with 20 and 30"), + ] + ui_manager = MockUIManager() + + processor = ConversationProcessor(context, ui_manager) + # Reset tool state to clear any prior registrations + processor._tool_state.reset_for_new_prompt() + count = processor._register_user_literals_from_history() + + # Should only process the most recent user message + assert count >= 2 # At least 20 and 30 + + +class TestProcessConversation: + """Tests for process_conversation method.""" + + @pytest.mark.asyncio + async def test_slash_command_skipped(self): + """Test that slash commands are skipped.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="/help")] + ui_manager = MockUIManager() + + processor = ConversationProcessor(context, ui_manager) + await processor.process_conversation() + + # Should return immediately without processing + assert len(context.conversation_history) == 1 + + @pytest.mark.asyncio + async def test_no_tools_loads_tools(self): + """Test that tools are loaded if not present.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = None # No tools loaded + context.client.create_completion = AsyncMock( + return_value={"response": "Hi there!", "tool_calls": None} + ) + ui_manager = MockUIManager() + + # Mock _load_tools + processor = ConversationProcessor(context, ui_manager) + processor._load_tools = AsyncMock() + + await processor.process_conversation() + + # Should have called _load_tools + processor._load_tools.assert_called_once() + + @pytest.mark.asyncio + async def test_simple_response(self): + """Test processing a simple text response.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + # Mock client response + context.client.create_completion = AsyncMock( + return_value={"response": "Hello! How can I help?", "tool_calls": None} + ) + + ui_manager = MockUIManager() + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + await processor.process_conversation() + + # Should have added assistant message to history + assert len(context.conversation_history) == 2 + assert context.conversation_history[-1].role == MessageRole.ASSISTANT + assert "Hello" in context.conversation_history[-1].content + + @pytest.mark.asyncio + async def test_max_turns_limit(self): + """Test that max_turns limit is enforced.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + # Mock client to always return tool calls (would loop forever) + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments='{"x": 18}'), + ) + context.client.create_completion = AsyncMock( + return_value={"response": "", "tool_calls": [tool_call.model_dump()]} + ) + + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + # Mock tool processor to avoid actual execution + processor.tool_processor.process_tool_calls = AsyncMock() + + # Set very low max_turns + await processor.process_conversation(max_turns=2) + + # Should have stopped due to max_turns + assert processor.tool_processor.process_tool_calls.call_count <= 2 + + +class TestHandleRegularCompletion: + """Tests for _handle_regular_completion.""" + + @pytest.mark.asyncio + async def test_regular_completion_success(self): + """Test successful regular completion.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.client.create_completion = AsyncMock( + return_value={"response": "Hi!", "tool_calls": None} + ) + + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + result = await processor._handle_regular_completion(tools=[]) + + assert isinstance(result, CompletionResponse) + assert result.response == "Hi!" + assert result.streaming is False + assert result.elapsed_time > 0 + + @pytest.mark.asyncio + async def test_regular_completion_tool_error_retry(self): + """Test retry without tools on tool definition error.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + + # First call fails with tool error, second succeeds + context.client.create_completion = AsyncMock( + side_effect=[ + Exception("Invalid 'tools' specification"), + {"response": "Hi without tools!", "tool_calls": None}, + ] + ) + + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + result = await processor._handle_regular_completion(tools=[{"some": "tool"}]) + + assert isinstance(result, CompletionResponse) + assert result.response == "Hi without tools!" + # Should have been called twice + assert context.client.create_completion.call_count == 2 + + @pytest.mark.asyncio + async def test_regular_completion_other_error_raises(self): + """Test that non-tool errors are raised.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.client.create_completion = AsyncMock( + side_effect=Exception("Some other error") + ) + + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + with pytest.raises(Exception, match="Some other error"): + await processor._handle_regular_completion(tools=[]) + + +class TestLoadTools: + """Tests for _load_tools.""" + + @pytest.mark.asyncio + async def test_load_tools_success(self): + """Test successful tool loading.""" + context = MockContext() + context.tool_manager.get_adapted_tools_for_llm = AsyncMock( + return_value=( + [{"type": "function", "function": {"name": "sqrt"}}], + {"sqrt": "math.sqrt"}, + ) + ) + + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + await processor._load_tools() + + assert len(context.openai_tools) == 1 + assert context.tool_name_mapping == {"sqrt": "math.sqrt"} + + @pytest.mark.asyncio + async def test_load_tools_error(self): + """Test tool loading error handling.""" + context = MockContext() + context.tool_manager.get_adapted_tools_for_llm = AsyncMock( + side_effect=Exception("Failed to load") + ) + + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + await processor._load_tools() + + # Should set empty tools on error + assert context.openai_tools == [] + assert context.tool_name_mapping == {} + + +class TestHandleStreamingCompletion: + """Tests for _handle_streaming_completion via process_conversation integration.""" + + @pytest.mark.asyncio + async def test_streaming_path_taken_when_supported(self): + """Test that streaming path is used when client supports it.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + ui_manager = MockUIManager() + ui_manager.start_streaming_response = AsyncMock() + ui_manager.display = MagicMock() + ui_manager.is_streaming_response = False + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + processor._tool_state = mock_tool_state + + # Mock streaming completion method directly + async def mock_streaming(tools=None): + return CompletionResponse( + response="Streamed!", + tool_calls=[], + streaming=True, + elapsed_time=0.5, + ) + + processor._handle_streaming_completion = mock_streaming + + await processor.process_conversation(max_turns=1) + + # The streaming handler should have been set to None at the end + assert ui_manager.streaming_handler is None + + @pytest.mark.asyncio + async def test_tool_calls_are_processed_in_conversation(self): + """Test that tool calls from completion are processed.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate") + ] + context.openai_tools = [{"type": "function", "function": {"name": "sqrt"}}] + context.tool_name_mapping = {} + + # Create client mock that returns tool calls first, then a response + call_count = [0] + + async def mock_completion(**kwargs): + call_count[0] += 1 + if call_count[0] == 1: + return { + "response": "", + "tool_calls": [ + { + "id": "call_1", + "type": "function", + "function": {"name": "sqrt", "arguments": '{"x": 16}'}, + } + ], + } + else: + return {"response": "The result is 4", "tool_calls": []} + + context.client.create_completion = mock_completion + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + from chuk_ai_session_manager.guards import RunawayStatus + + mock_tool_state.check_runaway = MagicMock( + return_value=RunawayStatus(should_stop=False) + ) + processor._tool_state = mock_tool_state + + # Mock tool processor + processor.tool_processor.process_tool_calls = AsyncMock() + + await processor.process_conversation(max_turns=3) + + # Should have processed tool calls + processor.tool_processor.process_tool_calls.assert_called() + + +class TestDuplicateToolCallDetection: + """Tests for duplicate tool call detection.""" + + @pytest.mark.asyncio + async def test_consecutive_duplicate_limit(self): + """Test that max consecutive duplicates triggers exit.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + # Create a tool call response + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments='{"x": 18}'), + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + processor = ConversationProcessor(context, ui_manager) + + # Simulate max consecutive duplicates + processor._consecutive_duplicate_count = 4 # One below max + processor._max_consecutive_duplicates = 5 + + # Mock to always return the same tool call + context.client.create_completion = AsyncMock( + return_value={"response": "", "tool_calls": [tool_call.model_dump()]} + ) + + # Mock tool processor + processor.tool_processor.process_tool_calls = AsyncMock() + + # Set last signature to match current + with patch.object(processor, "_register_user_literals_from_history"): + await processor.process_conversation(max_turns=3) + + # Should have stopped due to duplicate detection or max_turns + # The test validates the mechanism exists + + @pytest.mark.asyncio + async def test_duplicate_detection_reset_on_different_args(self): + """Test that duplicate counter resets when args differ.""" + context = MockContext() + ui_manager = MockUIManager() + + processor = ConversationProcessor(context, ui_manager) + processor._consecutive_duplicate_count = 3 + + # Accessing internal state to test reset + # When a non-duplicate comes in, counter should reset to 0 + assert processor._consecutive_duplicate_count == 3 + + # After processing different tool call, would reset + processor._consecutive_duplicate_count = 0 + assert processor._consecutive_duplicate_count == 0 + + +class TestBudgetExhaustion: + """Tests for tool budget exhaustion scenarios.""" + + @pytest.mark.asyncio + async def test_discovery_budget_exhausted(self): + """Test handling of discovery budget exhaustion.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Search for something") + ] + context.openai_tools = [] + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="search", arguments='{"query": "test"}'), + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + processor = ConversationProcessor(context, ui_manager) + + # Mock the tool state to indicate discovery budget exhausted + from chuk_ai_session_manager.guards import RunawayStatus + + mock_status = RunawayStatus( + should_stop=True, reason="Discovery budget exhausted", budget_exhausted=True + ) + mock_status_ok = RunawayStatus(should_stop=False) + + # Create a mock tool state manager + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=True) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + + check_call_count = [0] + + def mock_check_runaway(tool_name=None): + check_call_count[0] += 1 + # First check is for discovery tool budget + if check_call_count[0] == 1: + return mock_status + return mock_status_ok + + mock_tool_state.check_runaway = MagicMock(side_effect=mock_check_runaway) + mock_tool_state.format_discovery_exhausted_message = MagicMock( + return_value="Discovery exhausted" + ) + + # Replace the tool state + processor._tool_state = mock_tool_state + + context.client.create_completion = AsyncMock( + side_effect=[ + {"response": "", "tool_calls": [tool_call.model_dump()]}, + {"response": "Final answer", "tool_calls": []}, + ] + ) + + await processor.process_conversation(max_turns=3) + + @pytest.mark.asyncio + async def test_execution_budget_exhausted(self): + """Test handling of execution budget exhaustion.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Execute something") + ] + context.openai_tools = [] + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="execute", arguments="{}"), + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + processor = ConversationProcessor(context, ui_manager) + + from chuk_ai_session_manager.guards import RunawayStatus + + mock_status = RunawayStatus( + should_stop=True, reason="Execution budget exhausted", budget_exhausted=True + ) + mock_status_ok = RunawayStatus(should_stop=False) + + # Create a mock tool state manager + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=True) + + check_call_count = [0] + + def mock_check_runaway(tool_name=None): + check_call_count[0] += 1 + # First check (discovery) is OK, second check (execution) exhausted + if check_call_count[0] == 2: + return mock_status + return mock_status_ok + + mock_tool_state.check_runaway = MagicMock(side_effect=mock_check_runaway) + mock_tool_state.format_execution_exhausted_message = MagicMock( + return_value="Execution exhausted" + ) + + # Replace the tool state + processor._tool_state = mock_tool_state + + context.client.create_completion = AsyncMock( + side_effect=[ + {"response": "", "tool_calls": [tool_call.model_dump()]}, + {"response": "Done", "tool_calls": []}, + ] + ) + + await processor.process_conversation(max_turns=3) + + +class TestRunawayDetection: + """Tests for general runaway detection.""" + + @pytest.mark.asyncio + async def test_runaway_with_saturation(self): + """Test runaway detection with saturation.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate") + ] + context.openai_tools = [] + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="compute", arguments="{}"), + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + processor = ConversationProcessor(context, ui_manager) + + from chuk_ai_session_manager.guards import RunawayStatus + + mock_status = RunawayStatus( + should_stop=True, + reason="Saturation detected", + saturation_detected=True, + message="Results have converged", + ) + mock_status_ok = RunawayStatus(should_stop=False) + + # Create a mock tool state manager + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + mock_tool_state._recent_numeric_results = [3.14159] + + check_call_count = [0] + + def mock_check_runaway(tool_name=None): + check_call_count[0] += 1 + # Third call is general runaway check + if check_call_count[0] == 3: + return mock_status + return mock_status_ok + + mock_tool_state.check_runaway = MagicMock(side_effect=mock_check_runaway) + mock_tool_state.format_saturation_message = MagicMock( + return_value="Saturation message" + ) + + # Replace the tool state + processor._tool_state = mock_tool_state + + context.client.create_completion = AsyncMock( + side_effect=[ + {"response": "", "tool_calls": [tool_call.model_dump()]}, + {"response": "Final", "tool_calls": []}, + ] + ) + + await processor.process_conversation(max_turns=3) + + +class TestStreamingFallback: + """Tests for streaming fallback to regular completion.""" + + @pytest.mark.asyncio + async def test_streaming_fallback_on_error(self): + """Test fallback to regular completion when streaming fails.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + ui_manager = MockUIManager() + ui_manager.start_streaming_response = AsyncMock() + ui_manager.stop_streaming_response = AsyncMock() + ui_manager.print_assistant_message = AsyncMock() + ui_manager.display = MagicMock() + ui_manager.is_streaming_response = False + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + processor._tool_state = mock_tool_state + + # Mock streaming to fail and regular to succeed + async def mock_streaming_fail(tools=None): + raise Exception("Streaming error") + + processor._handle_streaming_completion = mock_streaming_fail + + # But regular completion succeeds + context.client.create_completion = AsyncMock( + return_value={"response": "Fallback response", "tool_calls": []} + ) + + await processor.process_conversation(max_turns=1) + + # Should have fallen back to regular completion + context.client.create_completion.assert_called() + + +class TestConversationErrorHandling: + """Tests for error handling in conversation processing.""" + + @pytest.mark.asyncio + async def test_general_error_in_loop(self): + """Test error handling during conversation loop.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Mock client to raise an error + context.client.create_completion = AsyncMock( + side_effect=ValueError("Some unexpected error") + ) + + await processor.process_conversation(max_turns=1) + + # Should have added error message to history + assert len(context.conversation_history) >= 2 + last_msg = context.conversation_history[-1] + # Error message may be a Message object or a string depending on the error path + content = last_msg.content if hasattr(last_msg, "content") else str(last_msg) + assert "error" in content.lower() + + @pytest.mark.asyncio + async def test_cancelled_error_propagates(self): + """Test that asyncio.CancelledError is re-raised.""" + import asyncio + + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + context.client.create_completion = AsyncMock( + side_effect=asyncio.CancelledError() + ) + + with pytest.raises(asyncio.CancelledError): + await processor.process_conversation(max_turns=1) + + +class TestInspectionHandling: + """Tests for signature inspection edge cases.""" + + @pytest.mark.asyncio + async def test_inspection_error_disables_streaming(self): + """Test that inspection errors disable streaming gracefully.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + # Create a client that has create_completion but inspection fails + mock_client = MagicMock() + mock_client.create_completion = AsyncMock( + return_value={"response": "Hi!", "tool_calls": None} + ) + + context.client = mock_client + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Mock inspect.signature to raise + with patch("inspect.signature", side_effect=ValueError("Cannot inspect")): + await processor.process_conversation(max_turns=1) + + # Should complete without streaming + mock_client.create_completion.assert_called() + + +class TestBindingExtraction: + """Tests for value binding extraction from responses.""" + + @pytest.mark.asyncio + async def test_extracts_bindings_from_response(self): + """Test that value bindings are extracted from assistant responses.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate") + ] + context.openai_tools = [] + + context.client.create_completion = AsyncMock( + return_value={ + "response": "The result is σ = 5.0 and π = 3.14159", + "tool_calls": [], # Empty list, not None + } + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state with binding extraction + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + mock_extract = MagicMock(return_value=[]) + mock_tool_state.extract_bindings_from_text = mock_extract + processor._tool_state = mock_tool_state + + await processor.process_conversation(max_turns=1) + + # Should have tried to extract bindings + mock_extract.assert_called_once() + + +class TestStreamingCleanup: + """Tests for streaming handler cleanup.""" + + @pytest.mark.asyncio + async def test_streaming_handler_cleared_after_response(self): + """Test that streaming handler reference is cleared after streaming response.""" + context = MockContext() + context.conversation_history = [Message(role=MessageRole.USER, content="Hello")] + context.openai_tools = [] + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.streaming_handler = MagicMock() # Simulate existing handler + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + processor._tool_state = mock_tool_state + + # Mock _handle_streaming_completion to return a streaming response + async def mock_streaming_completion(tools=None): + return CompletionResponse( + response="Hi!", + tool_calls=[], + streaming=True, + elapsed_time=0.5, + ) + + processor._handle_streaming_completion = mock_streaming_completion + + await processor.process_conversation(max_turns=1) + + # Handler should be cleared after streaming response + assert ui_manager.streaming_handler is None + + +class TestMaxTurnsWithToolCalls: + """Tests for max_turns limit with tool calls.""" + + @pytest.mark.asyncio + async def test_max_turns_stops_tool_loop(self): + """Test that max_turns limit stops infinite tool call loops.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Keep calling tools") + ] + context.openai_tools = [{"type": "function", "function": {"name": "loop"}}] + context.tool_name_mapping = {} + + # Always return tool calls - would loop forever without max_turns + async def infinite_tool_calls(**kwargs): + return { + "response": "", + "tool_calls": [ + { + "id": "call_1", + "type": "function", + "function": {"name": "loop", "arguments": "{}"}, + } + ], + } + + context.client.create_completion = infinite_tool_calls + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + from chuk_ai_session_manager.guards import RunawayStatus + + mock_tool_state.check_runaway = MagicMock( + return_value=RunawayStatus(should_stop=False) + ) + processor._tool_state = mock_tool_state + + # Mock tool processor + processor.tool_processor.process_tool_calls = AsyncMock() + + # Set very low max_turns + await processor.process_conversation(max_turns=2) + + # Should have stopped after max_turns + assert processor.tool_processor.process_tool_calls.call_count <= 2 + + +class TestGeneralRunawayDetection: + """Tests for general runaway detection scenarios.""" + + @pytest.mark.asyncio + async def test_runaway_with_budget_exhausted(self): + """Test runaway detection with budget exhausted.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate") + ] + context.openai_tools = [{"type": "function", "function": {"name": "compute"}}] + context.tool_name_mapping = {} + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="compute", arguments="{}"), + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + ui_manager.print_assistant_message = AsyncMock() + processor = ConversationProcessor(context, ui_manager) + + from chuk_ai_session_manager.guards import RunawayStatus + + mock_status = RunawayStatus( + should_stop=True, + reason="Budget exhausted", + budget_exhausted=True, + message="Tool call budget exhausted", + ) + mock_status_ok = RunawayStatus(should_stop=False) + + # Create a mock tool state manager + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + + check_call_count = [0] + + def mock_check_runaway(tool_name=None): + check_call_count[0] += 1 + # General runaway check is after discovery and execution checks + # Discovery check returns should_stop=False + # Execution check returns should_stop=False + # General runaway check (3rd call) returns should_stop=True + if check_call_count[0] >= 3: + return mock_status + return mock_status_ok + + mock_tool_state.check_runaway = MagicMock(side_effect=mock_check_runaway) + mock_tool_state.format_budget_exhausted_message = MagicMock( + return_value="Budget exhausted message" + ) + mock_tool_state.format_state_for_model = MagicMock(return_value="State summary") + + # Replace the tool state + processor._tool_state = mock_tool_state + + context.client.create_completion = AsyncMock( + side_effect=[ + {"response": "", "tool_calls": [tool_call.model_dump()]}, + {"response": "Final", "tool_calls": []}, + ] + ) + + await processor.process_conversation(max_turns=3) + + # The test validates that runaway detection mechanism is called + # The exact behavior depends on the tool state configuration + assert check_call_count[0] >= 1 # check_runaway was called + + +class TestDuplicateToolCallHandling: + """Tests for duplicate tool call handling with state injection.""" + + @pytest.mark.asyncio + async def test_duplicate_triggers_state_injection(self): + """Test that duplicate tool calls trigger state summary injection.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate") + ] + context.openai_tools = [{"type": "function", "function": {"name": "sqrt"}}] + context.tool_name_mapping = {} + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments='{"x": 16}'), + ) + + # Return same tool call multiple times, then final response + call_count = [0] + + async def repeated_tool_calls(**kwargs): + call_count[0] += 1 + if call_count[0] <= 3: # First 3 calls return same tool + return {"response": "", "tool_calls": [tool_call.model_dump()]} + else: + return {"response": "Done", "tool_calls": []} + + context.client.create_completion = repeated_tool_calls + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + mock_tool_state.format_state_for_model = MagicMock(return_value="State: v0=4.0") + from chuk_ai_session_manager.guards import RunawayStatus + + mock_tool_state.check_runaway = MagicMock( + return_value=RunawayStatus(should_stop=False) + ) + processor._tool_state = mock_tool_state + + # Mock tool processor + processor.tool_processor.process_tool_calls = AsyncMock() + + await processor.process_conversation(max_turns=10) + + # Should have injected state summary (called format_state_for_model) + # after detecting duplicate + assert mock_tool_state.format_state_for_model.call_count >= 1 + + +class TestReasoningContent: + """Tests for reasoning content handling.""" + + @pytest.mark.asyncio + async def test_reasoning_content_preserved(self): + """Test that reasoning content from response is preserved.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Think about this") + ] + context.openai_tools = [] + + context.client.create_completion = AsyncMock( + return_value={ + "response": "The answer is 42", + "tool_calls": [], + "reasoning_content": "Let me think step by step...", + } + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + processor._tool_state = mock_tool_state + + await processor.process_conversation(max_turns=1) + + # Check that reasoning content was added to the message + last_msg = context.conversation_history[-1] + assert last_msg.role == MessageRole.ASSISTANT + assert hasattr(last_msg, "reasoning_content") or "reasoning" in str(last_msg) + + +class TestMaxDuplicatesExceeded: + """Tests for max duplicates safety valve.""" + + @pytest.mark.asyncio + async def test_max_duplicates_breaks_loop(self): + """Test that hitting max consecutive duplicates breaks the loop.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate") + ] + context.openai_tools = [{"type": "function", "function": {"name": "sqrt"}}] + context.tool_name_mapping = {} + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments='{"x": 16}'), + ) + + # Always return the same tool call + context.client.create_completion = AsyncMock( + return_value={"response": "", "tool_calls": [tool_call.model_dump()]} + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + processor._max_consecutive_duplicates = 3 # Lower threshold for testing + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + mock_tool_state.format_state_for_model = MagicMock(return_value="") + from chuk_ai_session_manager.guards import RunawayStatus + + mock_tool_state.check_runaway = MagicMock( + return_value=RunawayStatus(should_stop=False) + ) + processor._tool_state = mock_tool_state + + # Mock tool processor + processor.tool_processor.process_tool_calls = AsyncMock() + + await processor.process_conversation(max_turns=20) + + # Should have detected duplicates and eventually broken out + # The exact count depends on implementation but should be limited + assert processor.tool_processor.process_tool_calls.call_count <= 10 + + +class TestDiscoveryBudgetWithMessage: + """Tests for discovery budget with formatted message.""" + + @pytest.mark.asyncio + async def test_discovery_budget_formats_message(self): + """Test that discovery budget exhaustion formats proper message.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Search") + ] + context.openai_tools = [{"type": "function", "function": {"name": "search"}}] + context.tool_name_mapping = {} + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="search", arguments="{}"), + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + from chuk_ai_session_manager.guards import RunawayStatus + + # Discovery budget exhausted with "Discovery" in reason + mock_status = RunawayStatus( + should_stop=True, + reason="Discovery budget exhausted", + budget_exhausted=True, + ) + mock_status_ok = RunawayStatus(should_stop=False) + + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=True) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + mock_tool_state.format_discovery_exhausted_message = MagicMock( + return_value="Discovery budget exhausted - please answer with available data" + ) + + # First check returns discovery exhausted + call_count = [0] + + def mock_check(tool_name=None): + call_count[0] += 1 + if call_count[0] == 1: + return mock_status + return mock_status_ok + + mock_tool_state.check_runaway = MagicMock(side_effect=mock_check) + processor._tool_state = mock_tool_state + + context.client.create_completion = AsyncMock( + side_effect=[ + {"response": "", "tool_calls": [tool_call.model_dump()]}, + {"response": "Here's my answer", "tool_calls": []}, + ] + ) + + await processor.process_conversation(max_turns=3) + + # Should have called format_discovery_exhausted_message + mock_tool_state.format_discovery_exhausted_message.assert_called() + + +class TestExecutionBudgetWithMessage: + """Tests for execution budget with formatted message.""" + + @pytest.mark.asyncio + async def test_execution_budget_formats_message(self): + """Test that execution budget exhaustion formats proper message.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Execute") + ] + context.openai_tools = [{"type": "function", "function": {"name": "execute"}}] + context.tool_name_mapping = {} + + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="execute", arguments="{}"), + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + from chuk_ai_session_manager.guards import RunawayStatus + + # Execution budget exhausted with "Execution" in reason + # The check passes tool name to check_runaway for execution tools + mock_status_exec = RunawayStatus( + should_stop=True, + reason="Execution budget exhausted", + budget_exhausted=True, + ) + mock_status_ok = RunawayStatus(should_stop=False) + + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=True) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + mock_tool_state.format_execution_exhausted_message = MagicMock( + return_value="Execution budget exhausted - please provide final answer" + ) + + # check_runaway is called with tool_name for specific checks + # When called with a tool name (execution tool), return exhausted + def mock_check(tool_name=None): + if tool_name is not None: + # This is the execution budget check + return mock_status_exec + return mock_status_ok + + mock_tool_state.check_runaway = MagicMock(side_effect=mock_check) + processor._tool_state = mock_tool_state + + context.client.create_completion = AsyncMock( + side_effect=[ + {"response": "", "tool_calls": [tool_call.model_dump()]}, + {"response": "Done", "tool_calls": []}, + ] + ) + + await processor.process_conversation(max_turns=3) + + # Should have called format_execution_exhausted_message + mock_tool_state.format_execution_exhausted_message.assert_called() + + +class TestPollingToolDetection: + """Tests for polling tool detection and loop exemption.""" + + def test_is_polling_tool_status(self): + """Test that 'status' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("render_status") is True + assert processor._is_polling_tool("get_status") is True + assert processor._is_polling_tool("remotion_render_status") is True + assert processor._is_polling_tool("job_status_check") is True + + def test_is_polling_tool_progress(self): + """Test that 'progress' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("check_progress") is True + assert processor._is_polling_tool("get_progress") is True + assert processor._is_polling_tool("render_progress") is True + + def test_is_polling_tool_check(self): + """Test that 'check' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("health_check") is True + assert processor._is_polling_tool("check_job") is True + + def test_is_polling_tool_poll(self): + """Test that 'poll' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("poll_results") is True + assert processor._is_polling_tool("poll_queue") is True + + def test_is_polling_tool_monitor(self): + """Test that 'monitor' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("monitor_job") is True + assert processor._is_polling_tool("system_monitor") is True + + def test_is_polling_tool_watch(self): + """Test that 'watch' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("watch_progress") is True + assert processor._is_polling_tool("file_watch") is True + + def test_is_polling_tool_wait(self): + """Test that 'wait' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("wait_for_completion") is True + assert processor._is_polling_tool("wait_job") is True + + def test_is_polling_tool_state(self): + """Test that 'state' tools are detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("get_state") is True + assert processor._is_polling_tool("job_state") is True + + def test_is_not_polling_tool(self): + """Test that non-polling tools are not detected as polling tools.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("sqrt") is False + assert processor._is_polling_tool("add") is False + assert processor._is_polling_tool("create_video") is False + assert ( + processor._is_polling_tool("render_video") is False + ) # render but not status + assert processor._is_polling_tool("calculate") is False + assert processor._is_polling_tool("search") is False + + def test_is_polling_tool_case_insensitive(self): + """Test that polling tool detection is case-insensitive.""" + context = MockContext() + ui_manager = MockUIManager() + processor = ConversationProcessor(context, ui_manager) + + assert processor._is_polling_tool("GET_STATUS") is True + assert processor._is_polling_tool("Check_Progress") is True + assert processor._is_polling_tool("POLL_RESULTS") is True + + @pytest.mark.asyncio + async def test_polling_tool_not_marked_as_duplicate(self): + """Test that polling tools calling same args are not marked as duplicates.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Check the status") + ] + context.openai_tools = [ + {"type": "function", "function": {"name": "render_status"}} + ] + context.tool_name_mapping = {} + + # Same status check tool call + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall( + name="render_status", arguments='{"job_id": "abc123"}' + ), + ) + + # Return same tool call multiple times, then final response + call_count = [0] + + async def repeated_status_checks(**kwargs): + call_count[0] += 1 + if call_count[0] <= 3: # First 3 calls return same status check + return {"response": "", "tool_calls": [tool_call.model_dump()]} + else: + return {"response": "Render complete!", "tool_calls": []} + + context.client.create_completion = repeated_status_checks + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.stop_streaming_response = AsyncMock() + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock tool state + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.is_discovery_tool = MagicMock(return_value=False) + mock_tool_state.is_execution_tool = MagicMock(return_value=False) + mock_tool_state.extract_bindings_from_text = MagicMock(return_value=[]) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + mock_tool_state.format_state_for_model = MagicMock(return_value="") + from chuk_ai_session_manager.guards import RunawayStatus + + mock_tool_state.check_runaway = MagicMock( + return_value=RunawayStatus(should_stop=False) + ) + processor._tool_state = mock_tool_state + + # Mock tool processor + processor.tool_processor.process_tool_calls = AsyncMock() + + await processor.process_conversation(max_turns=10) + + # All 3 status checks should have been processed (not skipped as duplicates) + assert processor.tool_processor.process_tool_calls.call_count >= 3 + # Duplicate counter should not have incremented for polling tools + assert processor._consecutive_duplicate_count == 0 + + +class TestBindingExtractionWithResults: + """Tests for binding extraction returning actual bindings.""" + + @pytest.mark.asyncio + async def test_extracts_bindings_with_values(self): + """Test that actual bindings are extracted and logged.""" + context = MockContext() + context.conversation_history = [ + Message(role=MessageRole.USER, content="Calculate") + ] + context.openai_tools = [] + + context.client.create_completion = AsyncMock( + return_value={ + "response": "The result is σ = 5.0", + "tool_calls": [], + } + ) + + ui_manager = MockUIManager() + ui_manager.is_streaming_response = False + ui_manager.print_assistant_message = AsyncMock() + + processor = ConversationProcessor(context, ui_manager) + + # Create mock binding + mock_binding = MagicMock() + mock_binding.id = "v0" + mock_binding.raw_value = 5.0 + mock_binding.aliases = ["σ"] + + # Create mock tool state that returns a binding + mock_tool_state = MagicMock() + mock_tool_state.reset_for_new_prompt = MagicMock() + mock_tool_state.register_user_literals = MagicMock(return_value=0) + mock_tool_state.extract_bindings_from_text = MagicMock( + return_value=[mock_binding] + ) + mock_tool_state.format_unused_warning = MagicMock(return_value=None) + processor._tool_state = mock_tool_state + + await processor.process_conversation(max_turns=1) + + # Should have called extract_bindings_from_text + mock_tool_state.extract_bindings_from_text.assert_called_once() diff --git a/tests/chat/test_init.py b/tests/chat/test_init.py new file mode 100644 index 00000000..81512fa8 --- /dev/null +++ b/tests/chat/test_init.py @@ -0,0 +1,29 @@ +# tests/chat/test_init.py +"""Tests for chat/__init__.py lazy import.""" + +import pytest + + +class TestChatModuleLazyImport: + """Tests for lazy import in chat module.""" + + def test_getattr_handle_chat_mode(self): + """Test lazy import of handle_chat_mode.""" + from mcp_cli import chat + + # Access the lazy-loaded attribute + func = chat.handle_chat_mode + assert callable(func) + + def test_getattr_invalid_attribute(self): + """Test AttributeError for invalid attribute.""" + from mcp_cli import chat + + with pytest.raises(AttributeError, match="has no attribute"): + _ = chat.nonexistent_attribute + + def test_all_exports(self): + """Test __all__ exports.""" + from mcp_cli import chat + + assert "handle_chat_mode" in chat.__all__ diff --git a/tests/chat/test_models.py b/tests/chat/test_models.py new file mode 100644 index 00000000..92457019 --- /dev/null +++ b/tests/chat/test_models.py @@ -0,0 +1,301 @@ +# tests/chat/test_models.py +"""Tests for chat/models.py.""" + +import json + +from mcp_cli.chat.models import ( + ChatStatus, + FunctionCall, + Message, + MessageField, + MessageRole, + ToolCallData, + ToolCallField, + ToolExecutionRecord, + ToolExecutionState, +) + + +class TestMessageRole: + """Tests for MessageRole enum.""" + + def test_values(self): + """Test enum values.""" + assert MessageRole.USER == "user" + assert MessageRole.ASSISTANT == "assistant" + assert MessageRole.SYSTEM == "system" + assert MessageRole.TOOL == "tool" + + +class TestMessageField: + """Tests for MessageField enum.""" + + def test_values(self): + """Test enum values.""" + assert MessageField.ROLE == "role" + assert MessageField.CONTENT == "content" + assert MessageField.TOOL_CALLS == "tool_calls" + + +class TestToolCallField: + """Tests for ToolCallField enum.""" + + def test_values(self): + """Test enum values.""" + assert ToolCallField.ID == "id" + assert ToolCallField.TYPE == "type" + assert ToolCallField.FUNCTION == "function" + assert ToolCallField.NAME == "name" + assert ToolCallField.ARGUMENTS == "arguments" + + +class TestFunctionCall: + """Tests for FunctionCall model.""" + + def test_create(self): + """Test creating a FunctionCall.""" + fc = FunctionCall(name="sqrt", arguments='{"x": 18}') + assert fc.name == "sqrt" + assert fc.arguments == '{"x": 18}' + + def test_get_arguments_dict(self): + """Test parsing arguments to dict.""" + fc = FunctionCall(name="sqrt", arguments='{"x": 18}') + args = fc.get_arguments_dict() + assert args == {"x": 18} + + def test_get_arguments_dict_invalid_json(self): + """Test get_arguments_dict with invalid JSON.""" + fc = FunctionCall(name="sqrt", arguments="not-json") + args = fc.get_arguments_dict() + assert args == {} + + def test_get_arguments_dict_non_dict(self): + """Test get_arguments_dict with non-dict JSON.""" + fc = FunctionCall(name="sqrt", arguments="[1, 2, 3]") + args = fc.get_arguments_dict() + assert args == {} + + def test_from_dict_args(self): + """Test creating from dict arguments.""" + fc = FunctionCall.from_dict_args("sqrt", {"x": 18}) + assert fc.name == "sqrt" + assert json.loads(fc.arguments) == {"x": 18} + + +class TestToolCallData: + """Tests for ToolCallData model.""" + + def test_create(self): + """Test creating ToolCallData.""" + fc = FunctionCall(name="sqrt", arguments='{"x": 18}') + tc = ToolCallData(id="call_123", type="function", function=fc) + assert tc.id == "call_123" + assert tc.type == "function" + assert tc.function.name == "sqrt" + + def test_to_dict(self): + """Test converting to dict.""" + fc = FunctionCall(name="sqrt", arguments='{"x": 18}') + tc = ToolCallData(id="call_123", type="function", function=fc) + d = tc.to_dict() + assert d["id"] == "call_123" + assert d["type"] == "function" + assert d["function"]["name"] == "sqrt" + + def test_from_dict(self): + """Test creating from dict.""" + data = { + "id": "call_123", + "type": "function", + "index": 0, + "function": { + "name": "sqrt", + "arguments": '{"x": 18}', + }, + } + tc = ToolCallData.from_dict(data) + assert tc.id == "call_123" + assert tc.function.name == "sqrt" + + def test_from_dict_defaults(self): + """Test from_dict with missing fields.""" + data = {} + tc = ToolCallData.from_dict(data) + assert tc.id == "" + assert tc.type == "function" + assert tc.index == 0 + + def test_merge_chunk_name(self): + """Test merging chunk with name.""" + fc1 = FunctionCall(name="", arguments="") + tc1 = ToolCallData(id="call_123", function=fc1) + + fc2 = FunctionCall(name="sqrt", arguments="") + tc2 = ToolCallData(id="call_123", function=fc2) + + tc1.merge_chunk(tc2) + assert tc1.function.name == "sqrt" + + def test_merge_chunk_arguments(self): + """Test merging chunk with arguments.""" + fc1 = FunctionCall(name="sqrt", arguments='{"x":') + tc1 = ToolCallData(id="call_123", function=fc1) + + fc2 = FunctionCall(name="", arguments=" 18}") + tc2 = ToolCallData(id="call_123", function=fc2) + + tc1.merge_chunk(tc2) + assert tc1.function.arguments == '{"x": 18}' + + +class TestMessage: + """Tests for Message model.""" + + def test_create_user_message(self): + """Test creating a user message.""" + msg = Message(role=MessageRole.USER, content="Hello") + assert msg.role == MessageRole.USER + assert msg.content == "Hello" + + def test_to_dict_simple(self): + """Test to_dict with simple message.""" + msg = Message(role=MessageRole.USER, content="Hello") + d = msg.to_dict() + assert d["role"] == "user" + assert d["content"] == "Hello" + + def test_to_dict_assistant_with_tool_calls(self): + """Test to_dict ensures content for assistant with tool_calls.""" + msg = Message( + role=MessageRole.ASSISTANT, + tool_calls=[{"id": "call_1", "type": "function", "function": {}}], + ) + d = msg.to_dict() + # Should have content field even if None + assert "content" in d + assert d["content"] is None + + def test_to_dict_with_reasoning_content(self): + """Test to_dict includes reasoning_content when set.""" + msg = Message( + role=MessageRole.ASSISTANT, + content="Answer", + reasoning_content="I thought about this...", + ) + d = msg.to_dict() + assert d["reasoning_content"] == "I thought about this..." + + def test_from_dict(self): + """Test creating from dict.""" + data = {"role": "user", "content": "Hello"} + msg = Message.from_dict(data) + assert msg.role == MessageRole.USER + assert msg.content == "Hello" + + def test_get_tool_calls_typed(self): + """Test get_tool_calls_typed.""" + msg = Message( + role=MessageRole.ASSISTANT, + tool_calls=[ + { + "id": "call_1", + "type": "function", + "function": {"name": "sqrt", "arguments": "{}"}, + } + ], + ) + typed = msg.get_tool_calls_typed() + assert len(typed) == 1 + assert typed[0].id == "call_1" + assert typed[0].function.name == "sqrt" + + def test_get_tool_calls_typed_empty(self): + """Test get_tool_calls_typed with no tool calls.""" + msg = Message(role=MessageRole.USER, content="Hello") + typed = msg.get_tool_calls_typed() + assert typed == [] + + def test_with_tool_calls(self): + """Test creating message with typed tool calls.""" + fc = FunctionCall(name="sqrt", arguments='{"x": 18}') + tc = ToolCallData(id="call_1", function=fc) + + msg = Message.with_tool_calls( + role=MessageRole.ASSISTANT, tool_calls=[tc], content="Calling sqrt" + ) + assert msg.role == MessageRole.ASSISTANT + assert msg.content == "Calling sqrt" + assert len(msg.tool_calls) == 1 + + +class TestToolExecutionRecord: + """Tests for ToolExecutionRecord model.""" + + def test_create(self): + """Test creating a record.""" + record = ToolExecutionRecord( + tool_name="sqrt", + arguments={"x": 18}, + result=4.2426, + ) + assert record.tool_name == "sqrt" + assert record.result == 4.2426 + + def test_to_dict(self): + """Test to_dict excludes None fields.""" + record = ToolExecutionRecord(tool_name="sqrt", result=4.2426) + d = record.to_dict() + assert "tool_name" in d + assert "error" not in d # None excluded + + def test_from_dict(self): + """Test creating from dict.""" + data = { + "tool_name": "sqrt", + "arguments": {"x": 18}, + "result": 4.2426, + } + record = ToolExecutionRecord.from_dict(data) + assert record.tool_name == "sqrt" + assert record.result == 4.2426 + + +class TestToolExecutionState: + """Tests for ToolExecutionState model.""" + + def test_create(self): + """Test creating state.""" + state = ToolExecutionState(name="sqrt", arguments={"x": 18}, start_time=1000.0) + assert state.name == "sqrt" + assert state.start_time == 1000.0 + assert state.completed is False + + def test_elapsed_time(self): + """Test elapsed_time calculation.""" + state = ToolExecutionState(name="sqrt", arguments={}, start_time=1000.0) + elapsed = state.elapsed_time(1005.0) + assert elapsed == 5.0 + + +class TestChatStatus: + """Tests for ChatStatus model.""" + + def test_create(self): + """Test creating status.""" + status = ChatStatus( + provider="openai", + model="gpt-4", + tool_count=10, + server_count=2, + ) + assert status.provider == "openai" + assert status.model == "gpt-4" + assert status.tool_count == 10 + + def test_to_dict(self): + """Test to_dict.""" + status = ChatStatus(provider="openai", model="gpt-4") + d = status.to_dict() + assert d["provider"] == "openai" + assert d["model"] == "gpt-4" diff --git a/tests/chat/test_response_models.py b/tests/chat/test_response_models.py new file mode 100644 index 00000000..0b9811a8 --- /dev/null +++ b/tests/chat/test_response_models.py @@ -0,0 +1,181 @@ +# tests/chat/test_response_models.py +"""Tests for chat/response_models.py.""" + +from mcp_cli.chat.response_models import ( + CompletionResponse, + FunctionCall, + Message, + MessageField, + MessageRole, + ToolCall, + convert_messages_to_dicts, + convert_messages_to_models, +) + + +class TestMessageField: + """Tests for MessageField enum.""" + + def test_values(self): + """Test enum values.""" + assert MessageField.ROLE == "role" + assert MessageField.CONTENT == "content" + assert MessageField.TOOL_CALLS == "tool_calls" + assert MessageField.TOOL_CALL_ID == "tool_call_id" + assert MessageField.NAME == "name" + + +class TestCompletionResponse: + """Tests for CompletionResponse model.""" + + def test_create_simple(self): + """Test creating a simple response.""" + resp = CompletionResponse(response="Hello!") + assert resp.response == "Hello!" + assert resp.tool_calls == [] + assert resp.reasoning_content is None + + def test_create_with_tool_calls(self): + """Test creating response with tool calls.""" + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments='{"x": 18}'), + ) + resp = CompletionResponse(response="", tool_calls=[tool_call]) + assert len(resp.tool_calls) == 1 + assert resp.tool_calls[0].function.name == "sqrt" + + def test_from_dict_simple(self): + """Test from_dict with simple response.""" + data = {"response": "Hello!", "chunks_received": 5} + resp = CompletionResponse.from_dict(data) + assert resp.response == "Hello!" + assert resp.chunks_received == 5 + + def test_from_dict_with_tool_calls_as_dicts(self): + """Test from_dict with tool calls as dicts.""" + data = { + "response": "", + "tool_calls": [ + { + "id": "call_1", + "type": "function", + "function": {"name": "sqrt", "arguments": '{"x": 18}'}, + } + ], + } + resp = CompletionResponse.from_dict(data) + assert len(resp.tool_calls) == 1 + assert resp.tool_calls[0].function.name == "sqrt" + + def test_from_dict_with_tool_calls_as_models(self): + """Test from_dict with tool calls already as ToolCall models.""" + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments='{"x": 18}'), + ) + data = {"response": "", "tool_calls": [tool_call]} + resp = CompletionResponse.from_dict(data) + assert len(resp.tool_calls) == 1 + assert resp.tool_calls[0].function.name == "sqrt" + + def test_from_dict_empty_tool_calls(self): + """Test from_dict with empty tool_calls list.""" + data = {"response": "Hi", "tool_calls": []} + resp = CompletionResponse.from_dict(data) + assert resp.tool_calls == [] + + def test_from_dict_defaults(self): + """Test from_dict with missing fields uses defaults.""" + data = {} + resp = CompletionResponse.from_dict(data) + assert resp.response == "" + assert resp.tool_calls == [] + assert resp.chunks_received == 0 + assert resp.elapsed_time == 0.0 + assert resp.interrupted is False + assert resp.streaming is False + + def test_to_dict(self): + """Test to_dict serialization.""" + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments='{"x": 18}'), + ) + resp = CompletionResponse( + response="Result", + tool_calls=[tool_call], + reasoning_content="Thinking...", + chunks_received=10, + elapsed_time=1.5, + interrupted=False, + streaming=True, + ) + d = resp.to_dict() + assert d["response"] == "Result" + assert len(d["tool_calls"]) == 1 + assert d["reasoning_content"] == "Thinking..." + assert d["chunks_received"] == 10 + assert d["elapsed_time"] == 1.5 + assert d["streaming"] is True + + def test_has_tool_calls_true(self): + """Test has_tool_calls property when True.""" + tool_call = ToolCall( + id="call_1", + type="function", + function=FunctionCall(name="sqrt", arguments="{}"), + ) + resp = CompletionResponse(tool_calls=[tool_call]) + assert resp.has_tool_calls is True + + def test_has_tool_calls_false(self): + """Test has_tool_calls property when False.""" + resp = CompletionResponse(response="Hello") + assert resp.has_tool_calls is False + + def test_has_content_true(self): + """Test has_content property when True.""" + resp = CompletionResponse(response="Hello") + assert resp.has_content is True + + def test_has_content_false(self): + """Test has_content property when False.""" + resp = CompletionResponse(response="") + assert resp.has_content is False + + +class TestConvertMessages: + """Tests for message conversion functions.""" + + def test_convert_messages_to_models_from_dicts(self): + """Test converting dict messages to models.""" + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + models = convert_messages_to_models(messages) + assert len(models) == 2 + assert models[0].role == MessageRole.USER + assert models[0].content == "Hello" + assert models[1].role == MessageRole.ASSISTANT + + def test_convert_messages_to_models_passthrough(self): + """Test that Message instances pass through unchanged.""" + msg = Message(role=MessageRole.USER, content="Hello") + models = convert_messages_to_models([msg]) + assert models[0] is msg # Same instance + + def test_convert_messages_to_dicts(self): + """Test converting Message models to dicts.""" + messages = [ + Message(role=MessageRole.USER, content="Hello"), + Message(role=MessageRole.ASSISTANT, content="Hi!"), + ] + dicts = convert_messages_to_dicts(messages) + assert len(dicts) == 2 + assert dicts[0]["role"] == "user" + assert dicts[0]["content"] == "Hello" diff --git a/tests/chat/test_tool_processor.py b/tests/chat/test_tool_processor.py index c5d70854..ed4004f0 100644 --- a/tests/chat/test_tool_processor.py +++ b/tests/chat/test_tool_processor.py @@ -1,10 +1,38 @@ # tests/mcp_cli/chat/test_tool_processor.py import json import pytest +from datetime import datetime, UTC + +from chuk_tool_processor import ToolResult as CTPToolResult +import chuk_ai_session_manager.guards.manager as _guard_mgr +from chuk_ai_session_manager.guards import ( + get_tool_state, + reset_tool_state, + RuntimeLimits, + ToolStateManager, +) from mcp_cli.chat.tool_processor import ToolProcessor +from mcp_cli.chat.response_models import ToolCall, FunctionCall from mcp_cli.tools.models import ToolCallResult + +@pytest.fixture(autouse=True) +def _fresh_tool_state(): + """Reset the global tool state singleton before each test with permissive limits.""" + reset_tool_state() + _guard_mgr._tool_state = ToolStateManager( + limits=RuntimeLimits( + per_tool_cap=100, + tool_budget_total=100, + discovery_budget=50, + execution_budget=50, + ) + ) + yield + reset_tool_state() + + # --------------------------- # Dummy classes for testing # --------------------------- @@ -18,14 +46,18 @@ def __init__(self): def print_tool_call(self, tool_name, raw_arguments): self.printed_calls.append((tool_name, raw_arguments)) - def finish_tool_execution(self, result=None, success=True): - # Add method that tool processor expects + async def finish_tool_execution(self, result=None, success=True): + # Add async method that tool processor expects pass def do_confirm_tool_execution(self, tool_name, arguments): # Mock confirmation - always return True for tests return True + async def start_tool_execution(self, tool_name, arguments): + # Mock start tool execution - no-op for tests + pass + class DummyStreamManager: def __init__(self, return_result=None, raise_exception=False): @@ -47,7 +79,7 @@ async def call_tool(self, tool_name, arguments): class DummyToolManager: - """Mock tool manager with execute_tool method that returns ToolCallResult.""" + """Mock tool manager with execute_tool and stream_execute_tools methods.""" def __init__(self, return_result=None, raise_exception=False): self.return_result = return_result or { @@ -58,7 +90,7 @@ def __init__(self, return_result=None, raise_exception=False): self.executed_tool = None self.executed_args = None - async def execute_tool(self, tool_name, arguments): + async def execute_tool(self, tool_name, arguments, namespace=None, timeout=None): self.executed_tool = tool_name self.executed_args = arguments if self.raise_exception: @@ -80,6 +112,58 @@ async def execute_tool(self, tool_name, arguments): error=None, ) + async def stream_execute_tools( + self, calls, timeout=None, on_tool_start=None, max_concurrency=4 + ): + """Yield CTPToolResult for each call.""" + import platform + import os + + for call in calls: + self.executed_tool = call.tool + self.executed_args = call.arguments + + # Invoke start callback if provided + if on_tool_start: + await on_tool_start(call) + + if self.raise_exception: + now = datetime.now(UTC) + yield CTPToolResult( + id=call.id, + tool=call.tool, + result=None, + error="Simulated execute_tool exception", + start_time=now, + end_time=now, + machine=platform.node(), + pid=os.getpid(), + ) + elif self.return_result.get("isError"): + now = datetime.now(UTC) + yield CTPToolResult( + id=call.id, + tool=call.tool, + result=None, + error=self.return_result.get("error", "Simulated error"), + start_time=now, + end_time=now, + machine=platform.node(), + pid=os.getpid(), + ) + else: + now = datetime.now(UTC) + yield CTPToolResult( + id=call.id, + tool=call.tool, + result=self.return_result.get("content"), + error=None, + start_time=now, + end_time=now, + machine=platform.node(), + pid=os.getpid(), + ) + class DummyContext: """A dummy context object with conversation_history and managers.""" @@ -89,6 +173,10 @@ def __init__(self, stream_manager=None, tool_manager=None): self.stream_manager = stream_manager self.tool_manager = tool_manager + def inject_tool_message(self, message): + """Add a message to conversation history (matches ChatContext API).""" + self.conversation_history.append(message) + # --------------------------- # Tests for ToolProcessor @@ -124,10 +212,12 @@ async def test_process_tool_calls_successful_tool(): ui_manager = DummyUIManager() processor = ToolProcessor(context, ui_manager) - tool_call = { - "function": {"name": "echo", "arguments": '{"msg": "Hello"}'}, - "id": "call_echo", - } + # Create a proper ToolCall Pydantic model instead of a dict + tool_call = ToolCall( + id="call_echo", + type="function", + function=FunctionCall(name="echo", arguments='{"msg": "Hello"}'), + ) await processor.process_tool_calls([tool_call]) # Verify that the UI manager printed the tool call. @@ -143,11 +233,13 @@ async def test_process_tool_calls_successful_tool(): # Verify the tool call record contains the correct id. assert call_record.tool_calls is not None - assert any(item.get("id") == "call_echo" for item in call_record.tool_calls) + # tool_calls is now a list of ToolCall Pydantic models, not dicts + assert any(item.id == "call_echo" for item in call_record.tool_calls) # Verify the response record. assert response_record.role.value == "tool" - assert response_record.content == "Tool executed successfully" + # Content now includes value binding info ($vN = value) + assert "Tool executed successfully" in response_record.content @pytest.mark.asyncio @@ -160,6 +252,10 @@ async def test_process_tool_calls_with_argument_parsing(): ui_manager = DummyUIManager() processor = ToolProcessor(context, ui_manager) + # Register 123 as a user-provided literal so it passes ungrounded check + tool_state = get_tool_state() + tool_state.register_user_literals("Test with value 123") + tool_call = { "function": {"name": "parse_tool", "arguments": '{"num": 123}'}, "id": "call_parse", @@ -170,10 +266,11 @@ async def test_process_tool_calls_with_argument_parsing(): assert isinstance(tool_manager.executed_args, dict) assert tool_manager.executed_args.get("num") == 123 - # Check that the response record content is formatted as a JSON string. + # Check that the response record content contains the formatted result. + # Note: Content now includes value binding info ($vN = value) appended response_record = context.conversation_history[1] expected_formatted = json.dumps(result_dict["content"], indent=2) - assert response_record.content == expected_formatted + assert expected_formatted in response_record.content @pytest.mark.asyncio @@ -203,8 +300,8 @@ async def test_process_tool_calls_tool_call_error(): @pytest.mark.asyncio -async def test_process_tool_calls_no_stream_manager(capfd): - # Test when no stream manager is available. +async def test_process_tool_calls_no_tool_manager(): + # Test when no tool manager is available. context = DummyContext(stream_manager=None, tool_manager=None) ui_manager = DummyUIManager() processor = ToolProcessor(context, ui_manager) @@ -215,16 +312,9 @@ async def test_process_tool_calls_no_stream_manager(capfd): "id": "test1", } - # Pass as a list to process_tool_calls - await processor.process_tool_calls([tool_call]) - - # The actual error message is "No tool manager available for tool execution" - error_msgs = [ - entry.content - for entry in context.conversation_history - if entry.content is not None - ] - assert any("No tool manager available" in msg for msg in error_msgs) + # Pass as a list to process_tool_calls - should raise RuntimeError + with pytest.raises(RuntimeError, match="No tool manager available"): + await processor.process_tool_calls([tool_call]) @pytest.mark.asyncio diff --git a/tests/cli/test_run_command.py b/tests/cli/test_run_command.py index aa5c8eff..c7e0a112 100644 --- a/tests/cli/test_run_command.py +++ b/tests/cli/test_run_command.py @@ -108,19 +108,29 @@ async def failing_async_command(): # --------------------------------------------------------------------------- # -# Monkey-patch **ToolManager** in the correct module for every test +# Monkey-patch **ToolManager** using the factory pattern for every test # --------------------------------------------------------------------------- # @pytest.fixture(autouse=True) def patch_tool_manager(monkeypatch): - # default -> success manager; individual tests override if needed + """Use the new factory pattern for ToolManager injection.""" + from mcp_cli.run_command import set_tool_manager_factory + + # Set the factory to use DummyToolManager + set_tool_manager_factory(DummyToolManager) + + # Also patch the direct import for backward compatibility monkeypatch.setattr( "mcp_cli.tools.manager.ToolManager", DummyToolManager, raising=True ) - # clean collected list between tests + + # Clean collected list between tests _ALL_TM.clear() yield _ALL_TM.clear() + # Reset the factory after test + set_tool_manager_factory(None) + # --------------------------------------------------------------------------- # # run_command (async) tests @@ -154,6 +164,9 @@ async def test_run_command_sync_callable(): @pytest.mark.asyncio async def test_run_command_cleanup_on_exception(monkeypatch): + from mcp_cli.run_command import set_tool_manager_factory + + set_tool_manager_factory(DummyToolManager) monkeypatch.setattr( "mcp_cli.tools.manager.ToolManager", DummyToolManager, raising=True ) @@ -169,6 +182,10 @@ async def test_run_command_cleanup_on_exception(monkeypatch): @pytest.mark.asyncio async def test_run_command_init_failure_raises(monkeypatch): + from mcp_cli.run_command import set_tool_manager_factory + + # Use the factory to inject the failing manager + set_tool_manager_factory(DummyInitFailToolManager) monkeypatch.setattr( "mcp_cli.tools.manager.ToolManager", DummyInitFailToolManager, raising=True ) @@ -179,7 +196,7 @@ async def test_run_command_init_failure_raises(monkeypatch): servers=["S1"], extra_params={}, ) - assert _ALL_TM[0].closed # close even when init “fails” + assert _ALL_TM[0].closed # close even when init "fails" # --------------------------------------------------------------------------- # diff --git a/tests/commands/actions/test_clear_action.py b/tests/commands/actions/test_clear_action.py deleted file mode 100644 index 097a1620..00000000 --- a/tests/commands/actions/test_clear_action.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Tests for clear action.""" - -from unittest.mock import patch -from mcp_cli.commands.actions.clear import clear_action - - -def test_clear_action_basic(): - """Test basic clear action without verbose.""" - with patch("mcp_cli.commands.actions.clear.clear_screen") as mock_clear: - clear_action() - mock_clear.assert_called_once() - - -def test_clear_action_verbose(): - """Test clear action with verbose output.""" - with ( - patch("mcp_cli.commands.actions.clear.clear_screen") as mock_clear, - patch("mcp_cli.commands.actions.clear.output") as mock_output, - ): - clear_action(verbose=True) - mock_clear.assert_called_once() - mock_output.hint.assert_called_once_with("Screen cleared.") - - -def test_clear_action_verbose_false(): - """Test clear action with verbose=False doesn't output.""" - with ( - patch("mcp_cli.commands.actions.clear.clear_screen") as mock_clear, - patch("mcp_cli.commands.actions.clear.output") as mock_output, - ): - clear_action(verbose=False) - mock_clear.assert_called_once() - mock_output.hint.assert_not_called() diff --git a/tests/commands/actions/test_cmd_action.py b/tests/commands/actions/test_cmd_action.py deleted file mode 100644 index 33fcedfc..00000000 --- a/tests/commands/actions/test_cmd_action.py +++ /dev/null @@ -1,486 +0,0 @@ -"""Tests for cmd action module.""" - -import pytest -from unittest.mock import patch, MagicMock, AsyncMock - -from mcp_cli.commands.actions.cmd import ( - cmd_action_async, - _execute_tool_direct, - _handle_tool_calls, -) - - -class TestCmdActionAsync: - """Tests for cmd_action_async main function.""" - - @pytest.mark.asyncio - async def test_no_context(self): - """Test when context is not initialized.""" - with patch("mcp_cli.context.get_context", return_value=None): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await cmd_action_async() - mock_output.error.assert_called_once() - assert "Context not initialized" in str(mock_output.error.call_args) - - @pytest.mark.asyncio - async def test_no_tool_manager(self): - """Test when context exists but no tool manager.""" - mock_context = MagicMock() - mock_context.tool_manager = None - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await cmd_action_async() - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_tool_mode(self): - """Test executing in tool mode.""" - mock_context = MagicMock() - mock_context.tool_manager = MagicMock() - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch( - "mcp_cli.commands.actions.cmd._execute_tool_direct" - ) as mock_exec: - await cmd_action_async(tool="test_tool", tool_args='{"arg": "value"}') - mock_exec.assert_called_once() - - @pytest.mark.asyncio - async def test_prompt_mode(self): - """Test executing in prompt mode.""" - mock_context = MagicMock() - mock_context.tool_manager = MagicMock() - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch( - "mcp_cli.commands.actions.cmd._execute_prompt_mode" - ) as mock_exec: - await cmd_action_async(prompt="Test prompt") - mock_exec.assert_called_once() - - @pytest.mark.asyncio - async def test_input_file_mode(self): - """Test executing with input file.""" - mock_context = MagicMock() - mock_context.tool_manager = MagicMock() - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch( - "mcp_cli.commands.actions.cmd._execute_prompt_mode" - ) as mock_exec: - await cmd_action_async(input_file="input.txt") - mock_exec.assert_called_once() - - @pytest.mark.asyncio - async def test_no_mode_specified(self): - """Test when no operation mode is specified.""" - mock_context = MagicMock() - mock_context.tool_manager = MagicMock() - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await cmd_action_async() - mock_output.error.assert_called() - assert "No operation specified" in str(mock_output.error.call_args) - - @pytest.mark.asyncio - async def test_exception_handling(self): - """Test exception handling in cmd_action_async.""" - mock_context = MagicMock() - mock_context.tool_manager = MagicMock() - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch( - "mcp_cli.commands.actions.cmd._execute_tool_direct", - side_effect=RuntimeError("Test error"), - ): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - with pytest.raises(RuntimeError, match="Test error"): - await cmd_action_async(tool="test_tool") - mock_output.error.assert_called() - - -class TestExecuteToolDirect: - """Tests for _execute_tool_direct function.""" - - @pytest.mark.asyncio - async def test_no_tool_manager(self): - """Test when tool manager is not available.""" - mock_context = MagicMock() - mock_context.tool_manager = None - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_tool_direct("test_tool", None, None, False) - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_invalid_json_args(self): - """Test with invalid JSON in tool arguments.""" - mock_context = MagicMock() - mock_context.tool_manager = MagicMock() - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_tool_direct("test_tool", "invalid json", None, False) - mock_output.error.assert_called() - assert "Invalid JSON" in str(mock_output.error.call_args) - - @pytest.mark.asyncio - async def test_tool_execution_success_raw(self): - """Test successful tool execution in raw mode.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - - mock_result = MagicMock() - mock_result.success = True - mock_result.error = None - mock_result.result = {"data": "test"} - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print") as mock_print: - await _execute_tool_direct( - "test_tool", '{"arg": "value"}', None, True - ) - mock_print.assert_called_once() - - @pytest.mark.asyncio - async def test_tool_execution_success_formatted(self): - """Test successful tool execution in formatted mode.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - - mock_result = MagicMock() - mock_result.success = True - mock_result.error = None - mock_result.result = {"data": "test"} - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print") as mock_print: - await _execute_tool_direct("test_tool", None, None, False) - mock_print.assert_called_once() - - @pytest.mark.asyncio - async def test_tool_execution_with_string_result(self): - """Test tool execution when result is a string.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - - mock_result = MagicMock() - mock_result.success = True - mock_result.error = None - mock_result.result = "string result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print") as mock_print: - await _execute_tool_direct("test_tool", None, None, True) - mock_print.assert_called_with("string result") - - @pytest.mark.asyncio - async def test_tool_execution_to_file(self, tmp_path): - """Test tool execution with output to file.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - - mock_result = MagicMock() - mock_result.success = True - mock_result.error = None - mock_result.result = {"data": "test"} - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - - output_file = tmp_path / "output.json" - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_tool_direct("test_tool", None, str(output_file), False) - assert output_file.exists() - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_tool_execution_failure(self): - """Test when tool execution fails.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - - mock_result = MagicMock() - mock_result.success = False - mock_result.error = "Tool failed" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_tool_direct("test_tool", None, None, False) - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_tool_execution_exception(self): - """Test exception handling in tool execution.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_tool_manager.execute_tool = AsyncMock( - side_effect=RuntimeError("Test error") - ) - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - with pytest.raises(RuntimeError): - await _execute_tool_direct("test_tool", None, None, False) - mock_output.error.assert_called() - - -class TestHandleToolCalls: - """Tests for _handle_tool_calls function.""" - - @pytest.mark.asyncio - async def test_no_tool_manager(self): - """Test when tool manager is not available.""" - mock_context = MagicMock() - mock_context.tool_manager = None - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - result = await _handle_tool_calls(None, [], [], "response", 10, False) - assert result == "response" - - @pytest.mark.asyncio - async def test_tool_call_dict_format(self): - """Test handling tool calls in dict format.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "tool result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "final response", "tool_calls": []} - ) - - tool_calls = [ - { - "function": {"name": "test_tool", "arguments": '{"arg": "value"}'}, - "id": "call_1", - } - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - result = await _handle_tool_calls( - mock_client, [], tool_calls, "response", 10, False - ) - assert result == "final response" - - @pytest.mark.asyncio - async def test_tool_call_object_format(self): - """Test handling tool calls in object format.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "tool result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "final response", "tool_calls": []} - ) - - # Mock object format - mock_tool_call = MagicMock() - mock_tool_call.function.name = "test_tool" - mock_tool_call.function.arguments = '{"arg": "value"}' - mock_tool_call.id = "call_1" - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - result = await _handle_tool_calls( - mock_client, [], [mock_tool_call], "response", 10, False - ) - assert result == "final response" - - @pytest.mark.asyncio - async def test_tool_call_with_dict_args(self): - """Test tool call with arguments already as dict.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "tool result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "final response", "tool_calls": []} - ) - - tool_calls = [ - { - "function": {"name": "test_tool", "arguments": {"arg": "value"}}, - "id": "call_1", - } - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - result = await _handle_tool_calls( - mock_client, [], tool_calls, "response", 10, False - ) - assert result == "final response" - - @pytest.mark.asyncio - async def test_tool_call_failure(self): - """Test when tool execution fails.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = False - mock_result.error = "Tool failed" - mock_result.result = None - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "final response", "tool_calls": []} - ) - - tool_calls = [ - {"function": {"name": "test_tool", "arguments": "{}"}, "id": "call_1"} - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - result = await _handle_tool_calls( - mock_client, [], tool_calls, "response", 10, False - ) - assert result == "final response" - - @pytest.mark.asyncio - async def test_tool_call_exception(self): - """Test when tool execution raises exception.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_tool_manager.execute_tool = AsyncMock( - side_effect=RuntimeError("Tool error") - ) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "final response", "tool_calls": []} - ) - - tool_calls = [ - {"function": {"name": "test_tool", "arguments": "{}"}, "id": "call_1"} - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - result = await _handle_tool_calls( - mock_client, [], tool_calls, "response", 10, False - ) - assert result == "final response" - mock_output.error.assert_called() - - @pytest.mark.asyncio - async def test_max_turns_reached(self): - """Test when max turns is reached.""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "tool result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - # Always return more tool calls to hit max turns - mock_client.create_completion = AsyncMock( - return_value={ - "response": "continuing", - "tool_calls": [ - { - "function": {"name": "test_tool", "arguments": "{}"}, - "id": "call_1", - } - ], - } - ) - - tool_calls = [ - {"function": {"name": "test_tool", "arguments": "{}"}, "id": "call_1"} - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _handle_tool_calls( - mock_client, [], tool_calls, "response", 2, False - ) - mock_output.warning.assert_called() - assert "Max turns" in str(mock_output.warning.call_args) - - @pytest.mark.asyncio - async def test_raw_mode(self): - """Test in raw mode (no info output).""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "tool result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "final response", "tool_calls": []} - ) - - tool_calls = [ - {"function": {"name": "test_tool", "arguments": "{}"}, "id": "call_1"} - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _handle_tool_calls( - mock_client, [], tool_calls, "response", 10, True - ) - # In raw mode, info should not be called - assert mock_output.info.call_count == 0 diff --git a/tests/commands/actions/test_cmd_extended.py b/tests/commands/actions/test_cmd_extended.py deleted file mode 100644 index 631a23a5..00000000 --- a/tests/commands/actions/test_cmd_extended.py +++ /dev/null @@ -1,770 +0,0 @@ -"""Extended tests for cmd action to reach 90%+ coverage.""" - -import pytest -from unittest.mock import patch, MagicMock, AsyncMock - -from mcp_cli.commands.actions.cmd import ( - _execute_prompt_mode, - _handle_tool_calls, -) - - -class TestExecutePromptMode: - """Comprehensive tests for _execute_prompt_mode function.""" - - @pytest.mark.asyncio - async def test_read_input_from_stdin(self): - """Test reading input from stdin (line 171-173).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - stdin_content = "This is stdin input" - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("sys.stdin.read", return_value=stdin_content): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print") as mock_print: - await _execute_prompt_mode( - input_file="-", - output_file=None, - prompt=None, - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify LLM was called with stdin input - mock_client.create_completion.assert_called_once() - call_args = mock_client.create_completion.call_args - messages = call_args[1]["messages"] - assert any(stdin_content in msg["content"] for msg in messages) - mock_print.assert_called_with("test response") - - @pytest.mark.asyncio - async def test_read_input_from_file(self, tmp_path): - """Test reading input from file (line 174-175).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - # Create a test input file - input_file = tmp_path / "input.txt" - input_file.write_text("File content here") - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=str(input_file), - output_file=None, - prompt=None, - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify LLM was called with file content - call_args = mock_client.create_completion.call_args - messages = call_args[1]["messages"] - assert any( - "File content here" in msg["content"] for msg in messages - ) - - @pytest.mark.asyncio - async def test_build_prompt_with_both_prompt_and_input(self): - """Test building full prompt with both prompt and input_text (line 178-179).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("sys.stdin.read", return_value="Input text"): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file="-", - output_file=None, - prompt="Analyze this", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify combined prompt - call_args = mock_client.create_completion.call_args - messages = call_args[1]["messages"] - content = messages[0]["content"] - assert "Analyze this" in content - assert "Input text" in content - - @pytest.mark.asyncio - async def test_build_prompt_with_only_prompt(self): - """Test building full prompt with only prompt (line 180-181).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Just a prompt", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify only prompt is used - call_args = mock_client.create_completion.call_args - messages = call_args[1]["messages"] - assert messages[0]["content"] == "Just a prompt" - - @pytest.mark.asyncio - async def test_build_prompt_with_only_input(self, tmp_path): - """Test building full prompt with only input_text (line 182-183).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - input_file = tmp_path / "input.txt" - input_file.write_text("Only input text") - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=str(input_file), - output_file=None, - prompt=None, - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify only input is used - call_args = mock_client.create_completion.call_args - messages = call_args[1]["messages"] - assert messages[0]["content"] == "Only input text" - - @pytest.mark.asyncio - async def test_error_no_prompt_or_input(self): - """Test error when no prompt or input provided (line 184-186).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt=None, - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - mock_output.error.assert_called_with("No prompt or input provided") - - @pytest.mark.asyncio - async def test_model_manager_from_context(self): - """Test using model_manager from context (line 191-202).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify get_client was called on context's model_manager - mock_context.model_manager.get_client.assert_called_once_with( - provider="anthropic", model="claude-3" - ) - - @pytest.mark.asyncio - async def test_fallback_to_new_model_manager(self): - """Test fallback to creating new ModelManager (line 192-198).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = None # No model manager in context - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - - mock_model_manager = MagicMock() - mock_model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch( - "mcp_cli.model_management.ModelManager", - return_value=mock_model_manager, - ): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify new ModelManager was created and switch_model called - mock_model_manager.switch_model.assert_called_once_with( - "anthropic", "claude-3" - ) - mock_model_manager.get_client.assert_called_once() - - @pytest.mark.asyncio - async def test_client_creation_failure(self): - """Test when get_client returns None (line 204-208).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - mock_context.model_manager.get_client.return_value = None - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - mock_output.error.assert_called() - assert "Failed to get LLM client" in str(mock_output.error.call_args) - - @pytest.mark.asyncio - async def test_client_initialization_exception(self): - """Test exception during client initialization (line 209-211).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.model_manager.get_client.side_effect = RuntimeError( - "Client init failed" - ) - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - mock_output.error.assert_called() - assert "Failed to initialize LLM client" in str( - mock_output.error.call_args - ) - - @pytest.mark.asyncio - async def test_build_messages_with_system_prompt(self): - """Test building messages with system prompt (line 215-217).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt="You are a helpful assistant", - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify system message is first - call_args = mock_client.create_completion.call_args - messages = call_args[1]["messages"] - assert len(messages) == 2 - assert messages[0]["role"] == "system" - assert messages[0]["content"] == "You are a helpful assistant" - assert messages[1]["role"] == "user" - - @pytest.mark.asyncio - async def test_get_tools_when_available_and_not_single_turn(self): - """Test getting tools when tool_manager exists and not single_turn (line 226-227).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - - mock_tools = [{"name": "tool1"}, {"name": "tool2"}] - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=mock_tools) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify tools were passed to LLM - call_args = mock_client.create_completion.call_args - assert call_args[1]["tools"] == mock_tools - - @pytest.mark.asyncio - async def test_no_tools_when_single_turn(self): - """Test that tools are None when single_turn=True (line 226).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=True, - max_turns=30, - ) - - # Verify tools were None - call_args = mock_client.create_completion.call_args - assert call_args[1]["tools"] is None - # Verify get_tools_for_llm was not called - mock_context.tool_manager.get_tools_for_llm.assert_not_called() - - @pytest.mark.asyncio - async def test_handle_tool_calls_when_present(self): - """Test handling tool calls when present and not single_turn (line 242-251).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - - mock_client = AsyncMock() - tool_calls = [ - {"function": {"name": "test_tool", "arguments": "{}"}, "id": "call_1"} - ] - mock_client.create_completion = AsyncMock( - return_value={"response": "initial response", "tool_calls": tool_calls} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch( - "mcp_cli.commands.actions.cmd._handle_tool_calls", - new_callable=AsyncMock, - ) as mock_handle: - mock_handle.return_value = "final response" - - with patch("builtins.print") as mock_print: - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify _handle_tool_calls was called - mock_handle.assert_called_once() - # Verify final response was printed - mock_print.assert_called_with("final response") - - @pytest.mark.asyncio - async def test_write_output_to_file(self, tmp_path): - """Test writing output to file (line 254-257).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - output_file = tmp_path / "output.txt" - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - await _execute_prompt_mode( - input_file=None, - output_file=str(output_file), - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - # Verify file was written - assert output_file.exists() - assert output_file.read_text() == "test response" - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_write_output_to_stdout(self): - """Test writing output to stdout (line 259-260).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "stdout response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - with patch("builtins.print") as mock_print: - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - mock_print.assert_called_with("stdout response") - - @pytest.mark.asyncio - async def test_raw_mode_no_info_output(self): - """Test raw mode doesn't output info messages (line 221).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - return_value={"response": "test response", "tool_calls": []} - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - with patch("builtins.print"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=True, - single_turn=False, - max_turns=30, - ) - - # In raw mode, info should not be called - assert mock_output.info.call_count == 0 - - @pytest.mark.asyncio - async def test_llm_execution_exception(self): - """Test exception handling in LLM execution (line 262-264).""" - mock_context = MagicMock() - mock_context.provider = "anthropic" - mock_context.model = "claude-3" - mock_context.model_manager = MagicMock() - mock_context.tool_manager = AsyncMock() - mock_context.tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - mock_client.create_completion = AsyncMock( - side_effect=RuntimeError("LLM failed") - ) - mock_context.model_manager.get_client.return_value = mock_client - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - with pytest.raises(RuntimeError, match="LLM failed"): - await _execute_prompt_mode( - input_file=None, - output_file=None, - prompt="Test", - system_prompt=None, - raw=False, - single_turn=False, - max_turns=30, - ) - - mock_output.error.assert_called() - assert "LLM execution failed" in str(mock_output.error.call_args) - - -class TestHandleToolCallsExtended: - """Extended tests for _handle_tool_calls to cover missing lines.""" - - @pytest.mark.asyncio - async def test_object_format_tool_calls_in_loop(self): - """Test object format tool calls in the continuation loop (lines 387-389).""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "tool result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - - # First call returns more tool calls (object format), second call returns no tool calls - mock_tool_call_obj = MagicMock() - mock_tool_call_obj.function.name = "test_tool" - mock_tool_call_obj.function.arguments = '{"arg": "value"}' - mock_tool_call_obj.id = "call_2" - - responses = [ - {"response": "continuing", "tool_calls": [mock_tool_call_obj]}, - {"response": "final response", "tool_calls": []}, - ] - mock_client.create_completion = AsyncMock(side_effect=responses) - - initial_tool_calls = [ - {"function": {"name": "init_tool", "arguments": "{}"}, "id": "call_1"} - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - result = await _handle_tool_calls( - mock_client, [], initial_tool_calls, "response", 10, False - ) - - assert result == "final response" - # Verify execute_tool was called twice (once for initial, once for loop) - assert mock_tool_manager.execute_tool.call_count == 2 - - @pytest.mark.asyncio - async def test_dict_args_in_loop(self): - """Test dict arguments (not string) in the continuation loop (line 395).""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "tool result" - mock_tool_manager.execute_tool = AsyncMock(return_value=mock_result) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - - # Tool call with arguments already as dict - tool_call_dict_args = { - "function": {"name": "test_tool", "arguments": {"key": "value"}}, - "id": "call_2", - } - - responses = [ - {"response": "continuing", "tool_calls": [tool_call_dict_args]}, - {"response": "final response", "tool_calls": []}, - ] - mock_client.create_completion = AsyncMock(side_effect=responses) - - initial_tool_calls = [ - {"function": {"name": "init_tool", "arguments": "{}"}, "id": "call_1"} - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output"): - result = await _handle_tool_calls( - mock_client, [], initial_tool_calls, "response", 10, False - ) - - assert result == "final response" - # Verify the dict args were passed correctly - calls = mock_tool_manager.execute_tool.call_args_list - assert calls[1][0][1] == {"key": "value"} - - @pytest.mark.asyncio - async def test_tool_execution_exception_in_loop(self): - """Test tool execution exception in continuation loop (lines 422-425).""" - mock_context = MagicMock() - mock_tool_manager = AsyncMock() - mock_context.tool_manager = mock_tool_manager - mock_context.model = "gpt-4" - - # First call succeeds, second call in loop raises exception - call_count = [0] - - async def execute_tool_side_effect(name, args): - call_count[0] += 1 - if call_count[0] == 1: - mock_result = MagicMock() - mock_result.success = True - mock_result.result = "success" - return mock_result - else: - raise RuntimeError("Tool execution failed in loop") - - mock_tool_manager.execute_tool = AsyncMock(side_effect=execute_tool_side_effect) - mock_tool_manager.get_tools_for_llm = AsyncMock(return_value=[]) - - mock_client = AsyncMock() - - # First response has more tool calls, second response has no tool calls - tool_call_2 = { - "function": {"name": "test_tool", "arguments": "{}"}, - "id": "call_2", - } - - responses = [ - {"response": "continuing", "tool_calls": [tool_call_2]}, - {"response": "final response", "tool_calls": []}, - ] - mock_client.create_completion = AsyncMock(side_effect=responses) - - initial_tool_calls = [ - {"function": {"name": "init_tool", "arguments": "{}"}, "id": "call_1"} - ] - - with patch("mcp_cli.context.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.cmd.output") as mock_output: - result = await _handle_tool_calls( - mock_client, [], initial_tool_calls, "response", 10, False - ) - - assert result == "final response" - # Verify error was logged - mock_output.error.assert_called() - assert "Tool execution failed" in str(mock_output.error.call_args) diff --git a/tests/commands/actions/test_exit_action.py b/tests/commands/actions/test_exit_action.py deleted file mode 100644 index 0587c28e..00000000 --- a/tests/commands/actions/test_exit_action.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Tests for exit action.""" - -from unittest.mock import patch - -from mcp_cli.commands.actions.exit import exit_action - - -def test_exit_action_interactive(): - """Test exit action in interactive mode.""" - with ( - patch("mcp_cli.commands.actions.exit.output") as mock_output, - patch("mcp_cli.commands.actions.exit.restore_terminal") as mock_restore, - ): - result = exit_action(interactive=True) - - mock_output.info.assert_called_once_with("Exiting… Goodbye!") - mock_restore.assert_called_once() - assert result is True - - -def test_exit_action_non_interactive(): - """Test exit action in non-interactive mode.""" - with ( - patch("mcp_cli.commands.actions.exit.output") as mock_output, - patch("mcp_cli.commands.actions.exit.restore_terminal") as mock_restore, - patch("mcp_cli.commands.actions.exit.sys.exit") as mock_exit, - ): - # This should call sys.exit and not return - exit_action(interactive=False) - - mock_output.info.assert_called_once_with("Exiting… Goodbye!") - mock_restore.assert_called_once() - mock_exit.assert_called_once_with(0) - - -def test_exit_action_default_interactive(): - """Test exit action with default interactive=True.""" - with ( - patch("mcp_cli.commands.actions.exit.output"), - patch("mcp_cli.commands.actions.exit.restore_terminal"), - ): - result = exit_action() # Default interactive=True - assert result is True diff --git a/tests/commands/actions/test_help_action.py b/tests/commands/actions/test_help_action.py deleted file mode 100644 index 6500a791..00000000 --- a/tests/commands/actions/test_help_action.py +++ /dev/null @@ -1,271 +0,0 @@ -"""Tests for help action.""" - -from unittest.mock import MagicMock, patch -import pytest - -from mcp_cli.commands.actions.help import ( - help_action, - _get_commands, - _show_command_help, - _show_all_commands, - _extract_description, -) - - -@pytest.fixture -def mock_commands(): - """Create mock command objects.""" - cmd1 = MagicMock() - cmd1.name = "test_command" - cmd1.help = "This is a test command\nUsage: test_command [args]" - cmd1.aliases = ["tc", "test"] - - cmd2 = MagicMock() - cmd2.name = "another_command" - cmd2.help = "Another command description" - cmd2.aliases = [] - - return {"test_command": cmd1, "another_command": cmd2} - - -def test_extract_description(): - """Test _extract_description helper function.""" - # Test None input - assert _extract_description(None) == "No description" - - # Test empty string - assert _extract_description("") == "No description" - - # Test single line - assert _extract_description("Simple description") == "Simple description" - - # Test multiline with usage - help_text = """ - usage: command [args] - This is the real description - More details here - """ - assert _extract_description(help_text) == "This is the real description" - - # Test multiline without usage - help_text = "First line description\nSecond line details" - assert _extract_description(help_text) == "First line description" - - # Test only usage line - assert _extract_description("usage: command") == "No description" - - # Test whitespace handling - assert _extract_description(" \n Real description \n ") == "Real description" - - -def test_help_action_show_all_commands(mock_commands): - """Test help_action showing all commands.""" - with ( - patch( - "mcp_cli.commands.actions.help._get_commands", return_value=mock_commands - ), - patch("mcp_cli.commands.actions.help._show_all_commands") as mock_show_all, - ): - help_action() - - mock_show_all.assert_called_once_with(mock_commands) - - -def test_help_action_show_specific_command(mock_commands): - """Test help_action showing specific command.""" - with ( - patch( - "mcp_cli.commands.actions.help._get_commands", return_value=mock_commands - ), - patch("mcp_cli.commands.actions.help._show_command_help") as mock_show_cmd, - ): - help_action("test_command") - - mock_show_cmd.assert_called_once_with("test_command", mock_commands) - - -def test_help_action_with_console_param(mock_commands): - """Test help_action with console parameter (backward compatibility).""" - mock_console = MagicMock() - - with ( - patch( - "mcp_cli.commands.actions.help._get_commands", return_value=mock_commands - ), - patch("mcp_cli.commands.actions.help._show_all_commands") as mock_show_all, - ): - help_action(console=mock_console) - - mock_show_all.assert_called_once_with(mock_commands) - - -def test_get_commands_interactive_registry(): - """Test _get_commands with unified registry.""" - mock_cmd1 = MagicMock() - mock_cmd1.name = "cmd1" - mock_cmd2 = MagicMock() - mock_cmd2.name = "cmd2" - - mock_registry = MagicMock() - mock_registry.list_commands.return_value = [mock_cmd1, mock_cmd2] - - with patch("mcp_cli.commands.actions.help.registry", mock_registry): - result = _get_commands() - - assert result == {"cmd1": mock_cmd1, "cmd2": mock_cmd2} - - -def test_get_commands_cli_registry_list(): - """Test _get_commands with empty list.""" - mock_registry = MagicMock() - mock_registry.list_commands.return_value = [] - - with patch("mcp_cli.commands.actions.help.registry", mock_registry): - result = _get_commands() - - assert result == {} - - -def test_get_commands_fallback_commands_attr(): - """Test _get_commands with multiple commands.""" - mock_cmd1 = MagicMock() - mock_cmd1.name = "cmd1" - mock_cmd1.hidden = False - mock_cmd2 = MagicMock() - mock_cmd2.name = "cmd2" - mock_cmd2.hidden = True # Hidden command should not appear - - mock_registry = MagicMock() - mock_registry.list_commands.return_value = [mock_cmd1] # Only non-hidden - - with patch("mcp_cli.commands.actions.help.registry", mock_registry): - result = _get_commands() - - assert result == {"cmd1": mock_cmd1} - - -def test_get_commands_empty_fallback(): - """Test _get_commands when no commands available.""" - mock_registry = MagicMock() - mock_registry.list_commands.return_value = [] - - with patch("mcp_cli.commands.actions.help.registry", mock_registry): - result = _get_commands() - - assert result == {} - - -def test_show_command_help_existing_command(): - """Test _show_command_help with existing command.""" - mock_cmd = MagicMock() - mock_cmd.name = "test_cmd" - mock_cmd.help = "Test command help" - mock_cmd.aliases = ["tc", "test"] - - commands = {"test_cmd": mock_cmd} - - with patch("mcp_cli.commands.actions.help.output") as mock_output: - _show_command_help("test_cmd", commands) - - mock_output.panel.assert_called_once_with( - "## test_cmd\n\nTest command help", title="Command Help", style="cyan" - ) - mock_output.print.assert_called_once_with("\n[dim]Aliases: tc, test[/dim]") - - -def test_show_command_help_no_aliases(): - """Test _show_command_help with command that has no aliases.""" - mock_cmd = MagicMock() - mock_cmd.name = "test_cmd" - mock_cmd.help = "Test command help" - mock_cmd.aliases = [] - - commands = {"test_cmd": mock_cmd} - - with patch("mcp_cli.commands.actions.help.output") as mock_output: - _show_command_help("test_cmd", commands) - - mock_output.panel.assert_called_once() - mock_output.print.assert_not_called() # No aliases to show - - -def test_show_command_help_missing_command(): - """Test _show_command_help with non-existent command.""" - commands = {} - - with patch("mcp_cli.commands.actions.help.output") as mock_output: - _show_command_help("nonexistent", commands) - - mock_output.error.assert_called_once_with("Unknown command: nonexistent") - - -def test_show_all_commands(mock_commands): - """Test _show_all_commands with mock commands.""" - with ( - patch("mcp_cli.commands.actions.help.output") as mock_output, - patch("mcp_cli.commands.actions.help.format_table") as mock_format_table, - ): - mock_table = MagicMock() - mock_format_table.return_value = mock_table - - _show_all_commands(mock_commands) - - # Verify table formatting - mock_format_table.assert_called_once() - table_data = mock_format_table.call_args[0][0] - - assert len(table_data) == 2 - assert table_data[0]["Command"] == "another_command" - assert table_data[0]["Aliases"] == "-" - assert table_data[1]["Command"] == "test_command" - assert table_data[1]["Aliases"] == "tc, test" - - mock_output.print_table.assert_called_once_with(mock_table) - # Check that hint was called with the expected messages - hint_calls = mock_output.hint.call_args_list - assert len(hint_calls) == 2 - assert ( - hint_calls[0][0][0] - == "\nType 'help ' for detailed information on a specific command." - ) - assert ( - hint_calls[1][0][0] - == "Custom providers need API keys as environment variables:" - ) - - -def test_show_all_commands_empty(): - """Test _show_all_commands with no commands.""" - with patch("mcp_cli.commands.actions.help.output") as mock_output: - _show_all_commands({}) - - mock_output.warning.assert_called_once_with("No commands available") - - -def test_show_command_help_missing_attributes(): - """Test _show_command_help with command missing optional attributes.""" - mock_cmd = MagicMock() - # Simulate missing attributes - mock_cmd.configure_mock( - **{ - "name": "test_cmd", - # No help attribute - # No aliases attribute - } - ) - del mock_cmd.help - del mock_cmd.aliases - - commands = {"test_cmd": mock_cmd} - - with patch("mcp_cli.commands.actions.help.output") as mock_output: - _show_command_help("test_cmd", commands) - - # Should handle missing help gracefully - mock_output.panel.assert_called_once_with( - "## test_cmd\n\nNo description provided.", - title="Command Help", - style="cyan", - ) - # Should not try to print aliases if they don't exist - mock_output.print.assert_not_called() diff --git a/tests/commands/actions/test_models_action.py b/tests/commands/actions/test_models_action.py deleted file mode 100644 index c70f6433..00000000 --- a/tests/commands/actions/test_models_action.py +++ /dev/null @@ -1,450 +0,0 @@ -"""Tests for the models action command.""" - -import pytest -from unittest.mock import AsyncMock, MagicMock, patch - -from mcp_cli.commands.actions.models import ( - model_action_async, - _show_status, - _list_models, - _refresh_models, - _switch_model, - _show_ollama_status, - _check_local_ollama, - model_action, -) -from mcp_cli.commands.models import ModelActionParams - - -@pytest.fixture -def mock_context(): - """Create a mock application context.""" - context = MagicMock() - context.model_manager = MagicMock() - context.model = "test-model" - return context - - -@pytest.fixture -def mock_model_manager(): - """Create a mock model manager.""" - manager = MagicMock() - manager.get_active_provider.return_value = "test-provider" - manager.get_active_model.return_value = "test-model" - manager.get_available_models.return_value = ["model1", "model2", "test-model"] - manager.validate_model.return_value = True - manager.refresh_models.return_value = 0 # Returns count of new models - return manager - - -@pytest.mark.asyncio -async def test_model_action_async_no_args(mock_context): - """Test model action with no arguments shows status.""" - with patch( - "mcp_cli.commands.actions.models.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.models._show_status", new_callable=AsyncMock - ) as mock_show: - await model_action_async(ModelActionParams(args=[])) - mock_show.assert_called_once() - - -@pytest.mark.asyncio -async def test_model_action_async_no_model_manager(): - """Test model action when model manager is not available.""" - context = MagicMock() - context.model_manager = None - - with patch("mcp_cli.commands.actions.models.get_context", return_value=context): - with patch("mcp_cli.commands.actions.models.output.error") as mock_error: - await model_action_async(ModelActionParams(args=[])) - mock_error.assert_called_with("Model manager not available") - - -@pytest.mark.asyncio -async def test_model_action_async_list_command(mock_context): - """Test model action with list command.""" - with patch( - "mcp_cli.commands.actions.models.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.models._list_models", new_callable=AsyncMock - ) as mock_list: - await model_action_async(ModelActionParams(args=["list"])) - mock_list.assert_called_once() - - -@pytest.mark.asyncio -async def test_model_action_async_refresh_command(mock_context): - """Test model action with refresh command.""" - with patch( - "mcp_cli.commands.actions.models.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.models._refresh_models", new_callable=AsyncMock - ) as mock_refresh: - await model_action_async(ModelActionParams(args=["refresh"])) - mock_refresh.assert_called_once() - - -@pytest.mark.asyncio -async def test_model_action_async_switch_model(mock_context): - """Test model action with model name to switch.""" - with patch( - "mcp_cli.commands.actions.models.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.models._switch_model", new_callable=AsyncMock - ) as mock_switch: - await model_action_async(ModelActionParams(args=["new-model"])) - mock_switch.assert_called_once() - - -@pytest.mark.asyncio -async def test_show_status_with_models(mock_model_manager): - """Test showing status with available models.""" - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _show_status(mock_model_manager, "test-model", "test-provider") - - # Verify output calls - mock_output.rule.assert_called_once() - assert mock_output.print.call_count > 0 - mock_output.success.assert_called() # For current model - mock_output.tip.assert_called_once() - - -@pytest.mark.asyncio -async def test_show_status_no_models(mock_model_manager): - """Test showing status with no available models.""" - mock_model_manager.get_available_models.return_value = [] - - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _show_status(mock_model_manager, "test-model", "test-provider") - - mock_output.warning.assert_called_with( - " ⚠️ No models found for current provider" - ) - - -@pytest.mark.asyncio -async def test_show_status_ollama_provider(mock_model_manager): - """Test showing status for Ollama provider.""" - with patch("mcp_cli.commands.actions.models.output"): - with patch( - "mcp_cli.commands.actions.models._show_ollama_status", - new_callable=AsyncMock, - ) as mock_ollama: - await _show_status(mock_model_manager, "test-model", "ollama") - mock_ollama.assert_called_once() - - -@pytest.mark.asyncio -async def test_show_status_many_models(mock_model_manager): - """Test showing status with more than 10 models.""" - mock_model_manager.get_available_models.return_value = [ - f"model{i}" for i in range(15) - ] - - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _show_status(mock_model_manager, "model0", "test-provider") - - # Should show "... and X more" message - calls = [str(call) for call in mock_output.print.call_args_list] - assert any("... and 5 more" in str(call) for call in calls) - - -@pytest.mark.asyncio -async def test_list_models_with_models(mock_model_manager): - """Test listing models when models are available.""" - with patch("mcp_cli.commands.actions.models.output") as mock_output: - with patch("mcp_cli.commands.actions.models.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - - await _list_models(mock_model_manager, "test-provider", "test-model") - - mock_format_table.assert_called_once() - mock_output.print_table.assert_called_with("formatted_table") - mock_output.tip.assert_called_once() - - -@pytest.mark.asyncio -async def test_list_models_no_models(mock_model_manager): - """Test listing models when no models are available.""" - mock_model_manager.get_available_models.return_value = [] - - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _list_models(mock_model_manager, "test-provider", "test-model") - - mock_output.error.assert_called_with( - "No models found for provider 'test-provider'" - ) - - -@pytest.mark.asyncio -async def test_list_models_ollama_provider(mock_model_manager): - """Test listing models for Ollama provider.""" - with patch("mcp_cli.commands.actions.models.output"): - with patch("mcp_cli.commands.actions.models.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - with patch( - "mcp_cli.commands.actions.models._check_local_ollama", - new_callable=AsyncMock, - ) as mock_check: - mock_check.return_value = (True, ["local-model1", "local-model2"]) - - await _list_models(mock_model_manager, "ollama", "test-model") - - mock_check.assert_called_once() - mock_format_table.assert_called_once() - - -@pytest.mark.asyncio -async def test_list_models_with_static_models(mock_model_manager): - """Test listing models with static model configuration.""" - mock_model_manager.get_provider_info.return_value = {"models": ["model1", "model2"]} - - with patch("mcp_cli.commands.actions.models.output"): - with patch("mcp_cli.commands.actions.models.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - - await _list_models(mock_model_manager, "test-provider", "test-model") - - # Verify table was called with correct data - call_args = mock_format_table.call_args[0][0] - assert any( - row["Type"] == "Static" - for row in call_args - if row["Model"] in ["model1", "model2"] - ) - - -@pytest.mark.asyncio -async def test_refresh_models_success(mock_model_manager): - """Test successful model refresh.""" - # Mock refresh_models to return 2 new models - mock_model_manager.refresh_models.return_value = 2 - # After refresh, there are 4 models total - mock_model_manager.get_available_models.return_value = [ - "model1", - "model2", - "model3", - "model4", - ] - - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _refresh_models(mock_model_manager, "test-provider") - - mock_output.success.assert_called_with("Discovered 2 new models!") - assert any( - "Total models: 4" in str(call) for call in mock_output.print.call_args_list - ) - - -@pytest.mark.asyncio -async def test_refresh_models_no_new_models(mock_model_manager): - """Test refresh with no new models discovered.""" - mock_model_manager.refresh_models.return_value = 0 # No new models - mock_model_manager.get_available_models.return_value = ["model1", "model2"] - - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _refresh_models(mock_model_manager, "test-provider") - - mock_output.info.assert_called_with("No new models discovered") - - -@pytest.mark.asyncio -async def test_refresh_models_exception(mock_model_manager): - """Test refresh with exception.""" - mock_model_manager.refresh_models.side_effect = Exception("Test error") - mock_model_manager.get_available_models.return_value = ["model1", "model2"] - - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _refresh_models(mock_model_manager, "test-provider") - - mock_output.error.assert_called_with("Refresh error: Test error") - - -@pytest.mark.asyncio -async def test_switch_model_success(mock_model_manager, mock_context): - """Test successful model switch.""" - with patch("mcp_cli.commands.actions.models.output") as mock_output: - with patch("mcp_cli.commands.actions.models.LLMProbe") as mock_probe_class: - mock_probe = AsyncMock() - mock_probe.test_model.return_value = MagicMock(success=True) - mock_probe_class.return_value.__aenter__.return_value = mock_probe - - await _switch_model( - "new-model", - mock_model_manager, - "test-provider", - "old-model", - mock_context, - ) - - # New API: validate_model(model, provider) - note swapped args - mock_model_manager.validate_model.assert_called_with( - "new-model", "test-provider" - ) - # New API: switch_model(provider, model) - mock_model_manager.switch_model.assert_called_with( - "test-provider", "new-model" - ) - assert mock_context.model == "new-model" - mock_output.success.assert_called_with("Switched to model: new-model") - - -@pytest.mark.asyncio -async def test_switch_model_invalid(mock_model_manager, mock_context): - """Test switching to invalid model.""" - mock_model_manager.validate_model.return_value = False - - with patch("mcp_cli.commands.actions.models.output") as mock_output: - await _switch_model( - "invalid-model", - mock_model_manager, - "test-provider", - "old-model", - mock_context, - ) - - mock_output.error.assert_called_with("Model not available: invalid-model") - mock_output.tip.assert_called() - - -@pytest.mark.asyncio -async def test_switch_model_test_failure(mock_model_manager, mock_context): - """Test model switch when test fails.""" - with patch("mcp_cli.commands.actions.models.output") as mock_output: - with patch("mcp_cli.commands.actions.models.LLMProbe") as mock_probe_class: - mock_probe = AsyncMock() - mock_probe.test_model.return_value = MagicMock( - success=False, error_message="Connection failed" - ) - mock_probe_class.return_value.__aenter__.return_value = mock_probe - - await _switch_model( - "new-model", - mock_model_manager, - "test-provider", - "old-model", - mock_context, - ) - - mock_output.error.assert_called_with("Model test failed: Connection failed") - mock_output.warning.assert_called_with("Keeping current model: old-model") - - -@pytest.mark.asyncio -async def test_switch_model_exception(mock_model_manager, mock_context): - """Test model switch with exception.""" - with patch("mcp_cli.commands.actions.models.output") as mock_output: - with patch("mcp_cli.commands.actions.models.LLMProbe") as mock_probe_class: - mock_probe_class.side_effect = Exception("Test error") - - await _switch_model( - "new-model", - mock_model_manager, - "test-provider", - "old-model", - mock_context, - ) - - mock_output.error.assert_called_with("Model switch failed: Test error") - mock_output.warning.assert_called_with("Keeping current model: old-model") - - -@pytest.mark.asyncio -async def test_show_ollama_status_running(mock_model_manager): - """Test showing Ollama status when running.""" - with patch("mcp_cli.commands.actions.models.output") as mock_output: - with patch( - "mcp_cli.commands.actions.models._check_local_ollama", - new_callable=AsyncMock, - ) as mock_check: - mock_check.return_value = (True, ["model1", "model2"]) - - await _show_ollama_status(mock_model_manager) - - mock_output.info.assert_called() - assert "Discovery: ✅" in mock_output.info.call_args[0][0] - - -@pytest.mark.asyncio -async def test_show_ollama_status_not_running(mock_model_manager): - """Test showing Ollama status when not running.""" - with patch("mcp_cli.commands.actions.models.output") as mock_output: - with patch( - "mcp_cli.commands.actions.models._check_local_ollama", - new_callable=AsyncMock, - ) as mock_check: - mock_check.return_value = (False, []) - - await _show_ollama_status(mock_model_manager) - - mock_output.hint.assert_called_with( - "\nOllama: Not running | Use 'ollama serve' to start" - ) - - -@pytest.mark.asyncio -async def test_show_ollama_status_exception(mock_model_manager): - """Test showing Ollama status with exception.""" - with patch("mcp_cli.commands.actions.models.output"): - with patch( - "mcp_cli.commands.actions.models._check_local_ollama", - new_callable=AsyncMock, - ) as mock_check: - mock_check.side_effect = Exception("Test error") - - # Should not raise exception - await _show_ollama_status(mock_model_manager) - - -@pytest.mark.asyncio -async def test_check_local_ollama_success(): - """Test checking local Ollama when it's running.""" - with patch("httpx.AsyncClient") as mock_client_class: - mock_client = AsyncMock() - mock_response = MagicMock() - mock_response.json.return_value = { - "models": [{"name": "model1"}, {"name": "model2"}] - } - mock_client.get.return_value = mock_response - mock_client_class.return_value.__aenter__.return_value = mock_client - - running, models = await _check_local_ollama() - - assert running is True - assert models == ["model1", "model2"] - - -@pytest.mark.asyncio -async def test_check_local_ollama_failure(): - """Test checking local Ollama when it's not running.""" - with patch("httpx.AsyncClient") as mock_client_class: - mock_client = AsyncMock() - mock_client.get.side_effect = Exception("Connection refused") - mock_client_class.return_value.__aenter__.return_value = mock_client - - running, models = await _check_local_ollama() - - assert running is False - assert models == [] - - -def test_model_action_sync(): - """Test synchronous wrapper for model action.""" - with patch("mcp_cli.commands.actions.models.run_blocking") as mock_run: - with patch( - "mcp_cli.commands.actions.models.model_action_async", new_callable=AsyncMock - ) as mock_async: - args = ["test", "args"] - model_action(args) - - # Verify async function was called with ModelActionParams - mock_async.assert_called_once() - call_args = mock_async.call_args[0][0] - assert call_args.args == args - # Verify run_blocking was called with the coroutine - mock_run.assert_called_once() diff --git a/tests/commands/actions/test_ping_action.py b/tests/commands/actions/test_ping_action.py deleted file mode 100644 index 04d38e75..00000000 --- a/tests/commands/actions/test_ping_action.py +++ /dev/null @@ -1,312 +0,0 @@ -"""Tests for the ping action command.""" - -import asyncio -import pytest -from unittest.mock import AsyncMock, MagicMock, patch - -from mcp_cli.commands.actions.ping import ( - ping_action_async, - _ping_server, - _get_server_name, - _matches_target, - _display_results, - ping_action, -) - - -@pytest.fixture -def mock_tool_manager(): - """Create a mock tool manager.""" - tm = MagicMock() - # Create mock streams - read_stream1 = MagicMock() - write_stream1 = MagicMock() - read_stream2 = MagicMock() - write_stream2 = MagicMock() - - tm.get_streams.return_value = [ - (read_stream1, write_stream1), - (read_stream2, write_stream2), - ] - - # Mock server info - server_info1 = MagicMock() - server_info1.name = "server1" - server_info2 = MagicMock() - server_info2.name = "server2" - - tm.get_server_info = AsyncMock(return_value=[server_info1, server_info2]) - - return tm - - -@pytest.mark.asyncio -async def test_ping_action_async_all_servers(mock_tool_manager): - """Test pinging all servers.""" - with patch( - "mcp_cli.commands.actions.ping._ping_server", new_callable=AsyncMock - ) as mock_ping: - mock_ping.return_value = ("server1", True, 10.5) - - with patch("mcp_cli.commands.actions.ping._display_results") as mock_display: - result = await ping_action_async(mock_tool_manager) - - assert result is True - assert mock_ping.call_count == 2 - mock_display.assert_called_once() - - -@pytest.mark.asyncio -async def test_ping_action_async_with_targets(mock_tool_manager): - """Test pinging specific target servers.""" - with patch( - "mcp_cli.commands.actions.ping._ping_server", new_callable=AsyncMock - ) as mock_ping: - mock_ping.return_value = ("server1", True, 10.5) - - with patch("mcp_cli.commands.actions.ping._display_results") as mock_display: - result = await ping_action_async(mock_tool_manager, targets=["server1"]) - - assert result is True - assert mock_ping.call_count == 1 - mock_display.assert_called_once() - - -@pytest.mark.asyncio -async def test_ping_action_async_no_matching_servers(mock_tool_manager): - """Test when no servers match the target.""" - with patch("mcp_cli.commands.actions.ping.output") as mock_output: - result = await ping_action_async(mock_tool_manager, targets=["nonexistent"]) - - assert result is False - mock_output.error.assert_called_with("No matching servers found") - mock_output.hint.assert_called_with( - "Use 'servers' command to list available servers" - ) - - -@pytest.mark.asyncio -async def test_ping_action_async_with_server_names(mock_tool_manager): - """Test pinging with explicit server names.""" - server_names = {0: "custom-name-1", 1: "custom-name-2"} - - with patch( - "mcp_cli.commands.actions.ping._ping_server", new_callable=AsyncMock - ) as mock_ping: - mock_ping.side_effect = [ - ("custom-name-1", True, 10.5), - ("custom-name-2", False, 100.0), - ] - - with patch("mcp_cli.commands.actions.ping._display_results"): - result = await ping_action_async( - mock_tool_manager, server_names=server_names - ) - - assert result is True - assert mock_ping.call_count == 2 - # Verify custom names were used - first_call = mock_ping.call_args_list[0] - assert first_call[0][1] == "custom-name-1" - - -@pytest.mark.asyncio -async def test_ping_server_success(): - """Test successful server ping.""" - read_stream = MagicMock() - write_stream = MagicMock() - - with patch( - "mcp_cli.commands.actions.ping.send_ping", new_callable=AsyncMock - ) as mock_send: - mock_send.return_value = True - - name, success, latency = await _ping_server( - 0, "test-server", read_stream, write_stream - ) - - assert name == "test-server" - assert success is True - assert latency > 0 - - -@pytest.mark.asyncio -async def test_ping_server_timeout(): - """Test server ping timeout.""" - read_stream = MagicMock() - write_stream = MagicMock() - - with patch( - "mcp_cli.commands.actions.ping.send_ping", new_callable=AsyncMock - ) as mock_send: - mock_send.side_effect = asyncio.TimeoutError() - - name, success, latency = await _ping_server( - 0, "test-server", read_stream, write_stream, timeout=0.1 - ) - - assert name == "test-server" - assert success is False - assert latency > 0 - - -@pytest.mark.asyncio -async def test_ping_server_exception(): - """Test server ping with exception.""" - read_stream = MagicMock() - write_stream = MagicMock() - - with patch( - "mcp_cli.commands.actions.ping.send_ping", new_callable=AsyncMock - ) as mock_send: - mock_send.side_effect = Exception("Connection error") - - name, success, latency = await _ping_server( - 0, "test-server", read_stream, write_stream - ) - - assert name == "test-server" - assert success is False - assert latency > 0 - - -def test_get_server_name_explicit(): - """Test getting server name with explicit names.""" - explicit_names = {0: "custom-name", 1: "another-name"} - server_infos = [] - - name = _get_server_name(0, explicit_names, server_infos) - assert name == "custom-name" - - -def test_get_server_name_from_info(): - """Test getting server name from server info.""" - server_info = MagicMock() - server_info.name = "info-name" - server_infos = [server_info] - - name = _get_server_name(0, None, server_infos) - assert name == "info-name" - - -def test_get_server_name_fallback(): - """Test getting server name with fallback.""" - name = _get_server_name(5, None, []) - assert name == "server-5" - - -def test_matches_target_by_index(): - """Test matching target by server index.""" - assert _matches_target(0, "server-name", ["0"]) is True - assert _matches_target(1, "server-name", ["0"]) is False - - -def test_matches_target_by_name(): - """Test matching target by server name.""" - assert _matches_target(0, "test-server", ["test-server"]) is True - assert _matches_target(0, "test-server", ["TEST-SERVER"]) is True - assert _matches_target(0, "test-server", ["other-server"]) is False - - -def test_matches_target_multiple(): - """Test matching with multiple targets.""" - assert _matches_target(0, "test-server", ["0", "test-server"]) is True - assert _matches_target(1, "other-server", ["test-server", "other-server"]) is True - assert _matches_target(2, "third-server", ["test-server", "other-server"]) is False - - -def test_display_results_all_online(): - """Test displaying results with all servers online.""" - results = [ - ("server1", True, 10.5), - ("server2", True, 20.3), - ("server3", True, 15.7), - ] - - with patch("mcp_cli.commands.actions.ping.output") as mock_output: - with patch("mcp_cli.commands.actions.ping.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - - _display_results(results) - - mock_format_table.assert_called_once() - mock_output.print_table.assert_called_with("formatted_table") - mock_output.success.assert_called_with("3/3 servers online") - mock_output.info.assert_called() # Average latency - - # Check table data - table_data = mock_format_table.call_args[0][0] - assert len(table_data) == 3 - assert all(row["Status"] == "✓ Online" for row in table_data) - - -def test_display_results_mixed(): - """Test displaying results with mixed online/offline servers.""" - results = [ - ("server1", True, 10.5), - ("server2", False, 100.0), - ("server3", True, 15.7), - ] - - with patch("mcp_cli.commands.actions.ping.output") as mock_output: - with patch("mcp_cli.commands.actions.ping.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - - _display_results(results) - - mock_output.success.assert_called_with("2/3 servers online") - - # Check table data - table_data = mock_format_table.call_args[0][0] - assert table_data[1]["Status"] == "✗ Offline" - assert table_data[1]["Latency"] == "-" - - -def test_display_results_all_offline(): - """Test displaying results with all servers offline.""" - results = [("server1", False, 100.0), ("server2", False, 100.0)] - - with patch("mcp_cli.commands.actions.ping.output") as mock_output: - with patch("mcp_cli.commands.actions.ping.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - - _display_results(results) - - mock_output.error.assert_called_with("All servers are offline") - # Should not call success or info - mock_output.success.assert_not_called() - mock_output.info.assert_not_called() - - -def test_display_results_sorted(): - """Test that results are sorted by server name.""" - results = [("zebra", True, 10.0), ("alpha", True, 20.0), ("beta", True, 15.0)] - - with patch("mcp_cli.commands.actions.ping.output"): - with patch("mcp_cli.commands.actions.ping.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - - _display_results(results) - - # Check that table data is sorted - table_data = mock_format_table.call_args[0][0] - server_names = [row["Server"] for row in table_data] - assert server_names == ["alpha", "beta", "zebra"] - - -def test_ping_action_sync(): - """Test synchronous wrapper for ping action.""" - tm = MagicMock() - server_names = {0: "test"} - targets = ["test"] - - with patch("mcp_cli.commands.actions.ping.run_blocking") as mock_run: - with patch( - "mcp_cli.commands.actions.ping.ping_action_async", new_callable=AsyncMock - ) as mock_async: - ping_action(tm, server_names=server_names, targets=targets) - - mock_async.assert_called_with( - tm, server_names=server_names, targets=targets - ) - mock_run.assert_called_once() diff --git a/tests/commands/actions/test_prompts_action.py b/tests/commands/actions/test_prompts_action.py deleted file mode 100644 index d48db477..00000000 --- a/tests/commands/actions/test_prompts_action.py +++ /dev/null @@ -1,165 +0,0 @@ -"""Tests for prompts action.""" - -from unittest.mock import AsyncMock, MagicMock, patch -import pytest - -from mcp_cli.commands.actions.prompts import ( - prompts_action_async, - prompts_action, - prompts_action_cmd, -) -from mcp_cli.commands.models import PromptInfoResponse - - -@pytest.fixture -def mock_context(): - """Create a mock context with tool manager.""" - context = MagicMock() - tool_manager = MagicMock() - context.tool_manager = tool_manager - return context, tool_manager - - -@pytest.fixture -def sample_prompts(): - """Create sample prompt data.""" - return [ - { - "server": "test_server", - "name": "test_prompt", - "description": "A test prompt", - }, - { - "server": "another_server", - "name": "another_prompt", - "description": "Another prompt", - }, - ] - - -@pytest.mark.asyncio -async def test_prompts_action_async_basic(mock_context, sample_prompts): - """Test basic prompts action async.""" - context, tool_manager = mock_context - tool_manager.list_prompts.return_value = sample_prompts - - with ( - patch("mcp_cli.commands.actions.prompts.get_context", return_value=context), - patch("mcp_cli.commands.actions.prompts.output") as mock_output, - patch("mcp_cli.commands.actions.prompts.format_table") as mock_format_table, - ): - mock_table = MagicMock() - mock_format_table.return_value = mock_table - - result = await prompts_action_async() - - # Verify table creation and output - mock_format_table.assert_called_once() - mock_output.print_table.assert_called_once_with(mock_table) - - # Verify result - should be Pydantic models - assert len(result) == len(sample_prompts) - assert all(isinstance(p, PromptInfoResponse) for p in result) - assert result[0].name == "test_prompt" - assert result[0].server == "test_server" - assert result[1].name == "another_prompt" - assert result[1].server == "another_server" - - -@pytest.mark.asyncio -async def test_prompts_action_async_no_tool_manager(): - """Test prompts action when no tool manager available.""" - context = MagicMock() - context.tool_manager = None - - with ( - patch("mcp_cli.commands.actions.prompts.get_context", return_value=context), - patch("mcp_cli.commands.actions.prompts.output") as mock_output, - ): - result = await prompts_action_async() - - mock_output.error.assert_called_once_with("No tool manager available") - assert result == [] - - -@pytest.mark.asyncio -async def test_prompts_action_async_no_prompts(mock_context): - """Test prompts action when no prompts available.""" - context, tool_manager = mock_context - tool_manager.list_prompts.return_value = [] - - with ( - patch("mcp_cli.commands.actions.prompts.get_context", return_value=context), - patch("mcp_cli.commands.actions.prompts.output") as mock_output, - ): - result = await prompts_action_async() - - mock_output.info.assert_called_once_with("No prompts recorded.") - assert result == [] - - -@pytest.mark.asyncio -async def test_prompts_action_async_exception(mock_context): - """Test prompts action when exception occurs.""" - context, tool_manager = mock_context - tool_manager.list_prompts.side_effect = Exception("Test error") - - with ( - patch("mcp_cli.commands.actions.prompts.get_context", return_value=context), - patch("mcp_cli.commands.actions.prompts.output") as mock_output, - ): - result = await prompts_action_async() - - mock_output.error.assert_called_once_with("Test error") - assert result == [] - - -@pytest.mark.asyncio -async def test_prompts_action_async_awaitable_result(mock_context, sample_prompts): - """Test prompts action when list_prompts returns an awaitable.""" - context, tool_manager = mock_context - - # Make list_prompts return a coroutine - async def async_list_prompts(): - return sample_prompts - - tool_manager.list_prompts.return_value = async_list_prompts() - - with ( - patch("mcp_cli.commands.actions.prompts.get_context", return_value=context), - patch("mcp_cli.commands.actions.prompts.output"), - patch("mcp_cli.commands.actions.prompts.format_table") as mock_format_table, - ): - mock_format_table.return_value = MagicMock() - - result = await prompts_action_async() - - # Verify result - should be Pydantic models - assert len(result) == len(sample_prompts) - assert all(isinstance(p, PromptInfoResponse) for p in result) - - -def test_prompts_action_sync_wrapper(): - """Test the sync wrapper function.""" - with patch("mcp_cli.commands.actions.prompts.run_blocking") as mock_run_blocking: - mock_run_blocking.return_value = [] - - result = prompts_action() - - mock_run_blocking.assert_called_once() - assert result == [] - - -@pytest.mark.asyncio -async def test_prompts_action_cmd_alias(): - """Test the cmd alias function.""" - with patch( - "mcp_cli.commands.actions.prompts.prompts_action_async", - new_callable=AsyncMock, - ) as mock_prompts_action: - mock_prompts_action.return_value = [] - - result = await prompts_action_cmd() - - mock_prompts_action.assert_called_once() - assert result == [] diff --git a/tests/commands/actions/test_providers_action.py b/tests/commands/actions/test_providers_action.py deleted file mode 100644 index 720becea..00000000 --- a/tests/commands/actions/test_providers_action.py +++ /dev/null @@ -1,459 +0,0 @@ -"""Tests for the providers action command.""" - -import pytest -import subprocess -from unittest.mock import AsyncMock, MagicMock, patch - -from mcp_cli.commands.actions.providers import ( - provider_action_async, - _check_ollama_running, - _get_provider_status_enhanced, - _get_model_count_display_enhanced, - _get_features_display_enhanced, - _render_list_optimized, - _render_diagnostic_optimized, - _switch_provider_enhanced, - provider_action, -) -from mcp_cli.commands.models import ProviderActionParams - - -@pytest.fixture -def mock_context(): - """Create a mock application context.""" - context = MagicMock() - context.model_manager = MagicMock() - context.provider = "test-provider" - context.model = "test-model" - return context - - -@pytest.fixture -def mock_model_manager(): - """Create a mock model manager.""" - manager = MagicMock() - manager.get_active_provider.return_value = "test-provider" - manager.get_active_provider_and_model.return_value = ("test-provider", "test-model") - manager.list_available_providers.return_value = { - "test-provider": { - "has_api_key": True, - "models": ["model1", "model2"], - "default_model": "model1", - "baseline_features": ["streaming", "tools"], - }, - "ollama": { - "models": ["llama2", "codellama"], - "default_model": "llama2", - "baseline_features": ["streaming"], - }, - } - manager.get_status_summary.return_value = { - "supports_streaming": True, - "supports_tools": True, - "supports_vision": False, - } - manager.list_providers.return_value = ["test-provider", "ollama", "openai"] - manager.validate_provider.return_value = True - manager.get_available_models.return_value = ["model1", "model2"] - manager.get_default_model.return_value = "model1" - return manager - - -def test_check_ollama_running_success(): - """Test checking if Ollama is running successfully.""" - mock_result = MagicMock() - mock_result.returncode = 0 - mock_result.stdout = "NAME\nllama2:latest\ncodellama:latest\n" - - with patch("subprocess.run", return_value=mock_result): - is_running, count = _check_ollama_running() - - assert is_running is True - assert count == 2 - - -def test_check_ollama_running_not_found(): - """Test checking Ollama when it's not found.""" - with patch("subprocess.run", side_effect=FileNotFoundError()): - is_running, count = _check_ollama_running() - - assert is_running is False - assert count == 0 - - -def test_check_ollama_running_timeout(): - """Test checking Ollama with timeout.""" - with patch("subprocess.run", side_effect=subprocess.TimeoutExpired("ollama", 5)): - is_running, count = _check_ollama_running() - - assert is_running is False - assert count == 0 - - -def test_check_ollama_running_failed(): - """Test checking Ollama when command fails.""" - mock_result = MagicMock() - mock_result.returncode = 1 - - with patch("subprocess.run", return_value=mock_result): - is_running, count = _check_ollama_running() - - assert is_running is False - assert count == 0 - - -def test_get_provider_status_enhanced_ollama_running(): - """Test getting enhanced status for Ollama when running.""" - from mcp_cli.commands.models.provider import ProviderData - - with patch( - "mcp_cli.commands.actions.providers._check_ollama_running", - return_value=(True, 3), - ): - provider_data = ProviderData(name="ollama") - status = _get_provider_status_enhanced("ollama", provider_data) - - assert status.icon == "✅" - assert status.text == "Ready" - assert "Running (3 models)" in status.reason - - -def test_get_provider_status_enhanced_ollama_not_running(): - """Test getting enhanced status for Ollama when not running.""" - from mcp_cli.commands.models.provider import ProviderData - - with patch( - "mcp_cli.commands.actions.providers._check_ollama_running", - return_value=(False, 0), - ): - provider_data = ProviderData(name="ollama") - status = _get_provider_status_enhanced("ollama", provider_data) - - assert status.icon == "❌" - assert status.text == "Not Running" - assert "Ollama service not accessible" in status.reason - - -def test_get_provider_status_enhanced_api_provider_ready(): - """Test getting enhanced status for API provider that's ready.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData( - name="openai", - has_api_key=True, - models=["model1", "model2"], - ) - status = _get_provider_status_enhanced("openai", provider_data) - - assert status.icon == "✅" - assert status.text == "Ready" - assert "Configured (2 models)" in status.reason - - -def test_get_provider_status_enhanced_api_provider_no_key(): - """Test getting enhanced status for API provider without key.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="openai", has_api_key=False) - status = _get_provider_status_enhanced("openai", provider_data) - - assert status.icon == "❌" - assert status.text == "Not Configured" - assert "No API key" in status.reason - - -def test_get_provider_status_enhanced_api_provider_partial(): - """Test getting enhanced status for API provider with key but no models.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="openai", has_api_key=True, models=[]) - status = _get_provider_status_enhanced("openai", provider_data) - icon, text, reason = status.icon, status.text, status.reason - - assert icon == "⚠️" - assert text == "Partial Setup" - assert "API key set but no models found" in reason - - -def test_get_model_count_display_enhanced_ollama(): - """Test getting model count display for Ollama.""" - with patch( - "mcp_cli.commands.actions.providers._check_ollama_running", - return_value=(True, 5), - ): - display = _get_model_count_display_enhanced("ollama", {}) - assert display == "5 models" - - -def test_get_model_count_display_enhanced_ollama_not_running(): - """Test getting model count display for Ollama when not running.""" - with patch( - "mcp_cli.commands.actions.providers._check_ollama_running", - return_value=(False, 0), - ): - display = _get_model_count_display_enhanced("ollama", {}) - assert display == "Ollama not running" - - -def test_get_model_count_display_enhanced_api_provider(): - """Test getting model count display.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="openai", models=["model1", "model2", "model3"]) - display = _get_model_count_display_enhanced("openai", provider_data) - assert display == "3 models" - - -def test_get_model_count_display_enhanced_no_models(): - """Test getting model count display.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="openai", models=[]) - display = _get_model_count_display_enhanced("openai", provider_data) - assert display == "No models found" - - -def test_get_model_count_display_enhanced_single_model(): - """Test getting model count display.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="openai", models=["model1"]) - display = _get_model_count_display_enhanced("openai", provider_data) - assert display == "1 model" - - -def test_get_model_count_display_enhanced_fallback(): - """Test getting model count display.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="openai", available_models=["model1", "model2"]) - display = _get_model_count_display_enhanced("openai", provider_data) - assert display == "2 models" - - -def test_get_features_display_enhanced_all_features(): - """Test getting features display.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData( - name="openai", - baseline_features=["streaming", "tools", "vision", "reasoning", "json_mode"], - ) - display = _get_features_display_enhanced(provider_data) - assert "📡" in display # streaming - assert "🔧" in display # tools - assert "👁️" in display # vision - assert "🧠" in display # reasoning - assert "📝" in display # json_mode - - -def test_get_features_display_enhanced_no_features(): - """Test getting features display.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="openai", baseline_features=[]) - display = _get_features_display_enhanced(provider_data) - assert display == "📄" - - -def test_get_features_display_enhanced_some_features(): - """Test getting features display.""" - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData( - name="openai", baseline_features=["streaming", "tools"] - ) - display = _get_features_display_enhanced(provider_data) - assert "📡" in display - assert "🔧" in display - assert "👁️" not in display - - -def test_render_list_optimized(mock_model_manager): - """Test rendering optimized provider list.""" - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - with patch( - "mcp_cli.commands.actions.providers.format_table" - ) as mock_format_table: - mock_format_table.return_value = "formatted_table" - - _render_list_optimized(mock_model_manager) - - mock_output.rule.assert_called_once() - mock_format_table.assert_called_once() - mock_output.print_table.assert_called_with("formatted_table") - mock_output.tip.assert_called() - - -def test_render_diagnostic_optimized_specific_provider(mock_model_manager): - """Test rendering diagnostic for specific provider.""" - with patch("mcp_cli.commands.actions.providers.output"): - with patch( - "mcp_cli.commands.actions.providers.format_table" - ) as mock_format_table: - mock_format_table.return_value = "formatted_table" - - _render_diagnostic_optimized(mock_model_manager, "test-provider") - - mock_format_table.assert_called_once() - table_data = mock_format_table.call_args[0][0] - assert len(table_data) == 1 - assert table_data[0]["Provider"] == "test-provider" - - -def test_render_diagnostic_optimized_unknown_provider(mock_model_manager): - """Test rendering diagnostic for unknown provider.""" - mock_model_manager.validate_provider.return_value = False - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _render_diagnostic_optimized(mock_model_manager, "unknown") - - mock_output.error.assert_called_with("Unknown provider: unknown") - mock_output.warning.assert_called() - - -def test_switch_provider_enhanced_success(mock_model_manager, mock_context): - """Test successful provider switch.""" - mock_model_manager.validate_provider.return_value = True - mock_model_manager.list_available_providers.return_value = { - "openai": { - "has_api_key": True, # Fixed: changed from has_key to has_api_key - "status": "available", - "models": ["gpt-4", "gpt-3.5-turbo"], - } - } - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_model_manager, "openai", "gpt-4", mock_context) - - mock_model_manager.switch_model.assert_called_once_with("openai", "gpt-4") - assert mock_context.provider == "openai" - assert mock_context.model == "gpt-4" - mock_output.success.assert_called() - - -def test_switch_provider_enhanced_invalid_provider(mock_model_manager, mock_context): - """Test switching to invalid provider.""" - mock_model_manager.validate_provider.return_value = False - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_model_manager, "invalid", None, mock_context) - - mock_output.error.assert_called_with("Unknown provider: invalid") - mock_model_manager.switch_model.assert_not_called() - - -def test_switch_provider_enhanced_ollama_not_running(mock_model_manager, mock_context): - """Test switching to Ollama when it's not running.""" - # Override the default mock to return proper ollama structure - mock_model_manager.get_available_providers.return_value = { - "ollama": { - "models": [], # Empty because Ollama is not running - } - } - - with patch( - "mcp_cli.commands.actions.providers._check_ollama_running", - return_value=(False, 0), - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_model_manager, "ollama", None, mock_context) - - mock_output.error.assert_called() - # Check that tip was called with the Ollama start message - assert any( - "ollama serve" in str(call) for call in mock_output.tip.call_args_list - ) - - -@pytest.mark.asyncio -async def test_provider_action_async_no_args(mock_context, mock_model_manager): - """Test provider action with no arguments shows status.""" - mock_context.model_manager = mock_model_manager - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - await provider_action_async(ProviderActionParams(args=[])) - - mock_output.rule.assert_called() - mock_output.print.assert_called() - mock_output.tip.assert_called() - - -@pytest.mark.asyncio -async def test_provider_action_async_list(mock_context): - """Test provider action with list command.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._render_list_optimized" - ) as mock_list: - await provider_action_async(ProviderActionParams(args=["list"])) - - mock_list.assert_called_once_with(mock_context.model_manager) - - -@pytest.mark.asyncio -async def test_provider_action_async_diagnostic(mock_context): - """Test provider action with diagnostic command.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._render_diagnostic_optimized" - ) as mock_diag: - await provider_action_async( - ProviderActionParams(args=["diagnostic", "test-provider"]) - ) - - mock_diag.assert_called_once_with( - mock_context.model_manager, "test-provider" - ) - - -@pytest.mark.asyncio -async def test_provider_action_async_switch(mock_context): - """Test provider action for switching providers.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._switch_provider_enhanced" - ) as mock_switch: - await provider_action_async(ProviderActionParams(args=["openai", "gpt-4"])) - - mock_switch.assert_called_once_with( - mock_context.model_manager, "openai", "gpt-4", mock_context - ) - - -@pytest.mark.asyncio -async def test_provider_action_async_no_model_manager(): - """Test provider action when model manager is not available.""" - context = MagicMock() - context.model_manager = None - - with patch("mcp_cli.commands.actions.providers.get_context", return_value=context): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - await provider_action_async(ProviderActionParams(args=[])) - - mock_output.error.assert_called_with("Model manager not available") - - -def test_provider_action_sync(): - """Test synchronous wrapper for provider action.""" - with patch("mcp_cli.utils.async_utils.run_blocking") as mock_run: - with patch( - "mcp_cli.commands.actions.providers.provider_action_async", - new_callable=AsyncMock, - ) as mock_async: - args = ["list"] - provider_action(args) - - # Verify async function was called with ProviderActionParams - mock_async.assert_called_once() - call_args = mock_async.call_args[0][0] - assert call_args.args == args - mock_run.assert_called_once() diff --git a/tests/commands/actions/test_providers_custom.py b/tests/commands/actions/test_providers_custom.py deleted file mode 100644 index c8e4d5e5..00000000 --- a/tests/commands/actions/test_providers_custom.py +++ /dev/null @@ -1,306 +0,0 @@ -"""Tests for custom provider commands in providers action.""" - -import os -from unittest.mock import MagicMock, patch - -import pytest - -from mcp_cli.commands.actions.providers import ( - _add_custom_provider, - _remove_custom_provider, - _list_custom_providers, - provider_action_async, -) -from mcp_cli.commands.models import ProviderActionParams - - -class TestCustomProviderCommands: - """Test custom provider command functions.""" - - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_add_custom_provider_success(self, mock_output, mock_get_prefs): - """Test successfully adding a custom provider.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - mock_get_prefs.return_value = mock_prefs - - # Test without environment variable set - with patch.dict(os.environ, {}, clear=True): - _add_custom_provider( - name="testai", - api_base="https://api.test.com/v1", - models=["gpt-4", "gpt-3.5"], - ) - - # Verify add was called - mock_prefs.add_custom_provider.assert_called_once_with( - name="testai", - api_base="https://api.test.com/v1", - models=["gpt-4", "gpt-3.5"], - default_model="gpt-4", - ) - - # Verify success message - mock_output.success.assert_any_call("✅ Added provider 'testai'") - mock_output.info.assert_any_call(" API Base: https://api.test.com/v1") - mock_output.info.assert_any_call(" Models: gpt-4, gpt-3.5") - - # Verify warning about missing API key - mock_output.warning.assert_called() - mock_output.print.assert_any_call( - " [bold]export TESTAI_API_KEY=your-api-key[/bold]" - ) - - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_add_custom_provider_with_api_key(self, mock_output, mock_get_prefs): - """Test adding a custom provider when API key is already set.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - mock_get_prefs.return_value = mock_prefs - - # Test with environment variable set - with patch.dict(os.environ, {"TESTAI_API_KEY": "test-key-123"}): - _add_custom_provider( - name="testai", api_base="https://api.test.com/v1", models=["gpt-4"] - ) - - # Verify success message with API key found - mock_output.success.assert_any_call(" API Key: ✅ Found in TESTAI_API_KEY") - - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_add_custom_provider_already_exists(self, mock_output, mock_get_prefs): - """Test adding a provider that already exists.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = True - mock_get_prefs.return_value = mock_prefs - - _add_custom_provider(name="existing", api_base="https://api.test.com/v1") - - # Should not call add - mock_prefs.add_custom_provider.assert_not_called() - - # Should show error - mock_output.error.assert_called_with( - "Provider 'existing' already exists. Use 'update' to modify it." - ) - - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_add_custom_provider_no_models(self, mock_output, mock_get_prefs): - """Test adding a provider without specifying models.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - mock_get_prefs.return_value = mock_prefs - - _add_custom_provider(name="nomodels", api_base="https://api.test.com/v1") - - # Should use default models - mock_prefs.add_custom_provider.assert_called_once_with( - name="nomodels", - api_base="https://api.test.com/v1", - models=["gpt-4", "gpt-3.5-turbo"], - default_model="gpt-4", - ) - - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_remove_custom_provider_success(self, mock_output, mock_get_prefs): - """Test successfully removing a custom provider.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = True - mock_prefs.remove_custom_provider.return_value = True - mock_get_prefs.return_value = mock_prefs - - _remove_custom_provider("testai") - - # Verify remove was called - mock_prefs.remove_custom_provider.assert_called_once_with("testai") - - # Verify success message - mock_output.success.assert_called_with("✅ Removed provider 'testai'") - - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_remove_custom_provider_not_custom(self, mock_output, mock_get_prefs): - """Test removing a provider that's not custom.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - mock_get_prefs.return_value = mock_prefs - - _remove_custom_provider("openai") - - # Should not call remove - mock_prefs.remove_custom_provider.assert_not_called() - - # Should show error - mock_output.error.assert_called_with( - "Provider 'openai' is not a custom provider or doesn't exist." - ) - - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_remove_custom_provider_failed(self, mock_output, mock_get_prefs): - """Test failed removal of custom provider.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = True - mock_prefs.remove_custom_provider.return_value = False - mock_get_prefs.return_value = mock_prefs - - _remove_custom_provider("testai") - - # Should show error - mock_output.error.assert_called_with("Failed to remove provider 'testai'") - - @patch("mcp_cli.commands.actions.providers.format_table") - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_list_custom_providers_empty( - self, mock_output, mock_get_prefs, mock_format - ): - """Test listing custom providers when none exist.""" - mock_prefs = MagicMock() - mock_prefs.get_custom_providers.return_value = {} - mock_get_prefs.return_value = mock_prefs - - _list_custom_providers() - - # Should show no providers message - mock_output.info.assert_called_with("No custom providers configured.") - mock_output.tip.assert_called_with( - "Add one with: /provider add [models...]" - ) - - # Should not create table - mock_format.assert_not_called() - - @patch("mcp_cli.commands.actions.providers.format_table") - @patch("mcp_cli.utils.preferences.get_preference_manager") - @patch("mcp_cli.commands.actions.providers.output") - def test_list_custom_providers_with_providers( - self, mock_output, mock_get_prefs, mock_format - ): - """Test listing custom providers when they exist.""" - mock_prefs = MagicMock() - mock_prefs.get_custom_providers.return_value = { - "provider1": { - "api_base": "https://api1.com/v1", - "models": ["model1", "model2"], - "default_model": "model1", - "env_var_name": None, - }, - "provider2": { - "api_base": "https://api2.com/v1", - "models": ["model3"], - "default_model": "model3", - "env_var_name": "CUSTOM_KEY", - }, - } - mock_get_prefs.return_value = mock_prefs - mock_format.return_value = "formatted_table" - - # Test with one API key set - with patch.dict(os.environ, {"PROVIDER1_API_KEY": "key1"}): - _list_custom_providers() - - # Should show rule - mock_output.rule.assert_called_with( - "[bold]🔧 Custom Providers[/bold]", style="primary" - ) - - # Should format table with correct data - mock_format.assert_called_once() - table_data = mock_format.call_args[0][0] - assert len(table_data) == 2 - - # Check first provider - assert table_data[0]["Provider"] == "provider1" - assert table_data[0]["API Base"] == "https://api1.com/v1" - assert table_data[0]["Models"] == "model1, model2" - assert table_data[0]["Default"] == "model1" - assert "✅" in table_data[0]["Token"] # Has key - assert "PROVIDER1_API_KEY" in table_data[0]["Token"] - - # Check second provider - assert table_data[1]["Provider"] == "provider2" - assert "❌" in table_data[1]["Token"] # No key - assert "CUSTOM_KEY" in table_data[1]["Token"] # Custom env var - - @pytest.mark.asyncio - @patch("mcp_cli.commands.actions.providers.get_context") - @patch("mcp_cli.commands.actions.providers.ModelManager") - async def test_provider_action_add_command( - self, mock_model_manager, mock_get_context - ): - """Test provider action with add command.""" - mock_context = MagicMock() - mock_get_context.return_value = mock_context - - with patch( - "mcp_cli.commands.actions.providers._add_custom_provider" - ) as mock_add: - await provider_action_async( - ProviderActionParams( - args=["add", "testai", "https://api.test.com/v1", "gpt-4"] - ) - ) - - mock_add.assert_called_once_with( - "testai", "https://api.test.com/v1", ["gpt-4"] - ) - - @pytest.mark.asyncio - @patch("mcp_cli.commands.actions.providers.get_context") - @patch("mcp_cli.commands.actions.providers.ModelManager") - async def test_provider_action_remove_command( - self, mock_model_manager, mock_get_context - ): - """Test provider action with remove command.""" - mock_context = MagicMock() - mock_get_context.return_value = mock_context - - with patch( - "mcp_cli.commands.actions.providers._remove_custom_provider" - ) as mock_remove: - await provider_action_async(ProviderActionParams(args=["remove", "testai"])) - - mock_remove.assert_called_once_with("testai") - - @pytest.mark.asyncio - @patch("mcp_cli.commands.actions.providers.get_context") - @patch("mcp_cli.commands.actions.providers.ModelManager") - async def test_provider_action_custom_command( - self, mock_model_manager, mock_get_context - ): - """Test provider action with custom command.""" - mock_context = MagicMock() - mock_get_context.return_value = mock_context - - with patch( - "mcp_cli.commands.actions.providers._list_custom_providers" - ) as mock_list: - await provider_action_async(ProviderActionParams(args=["custom"])) - - mock_list.assert_called_once() - - @pytest.mark.asyncio - @patch("mcp_cli.commands.actions.providers._switch_provider_enhanced") - @patch("mcp_cli.commands.actions.providers.get_context") - @patch("mcp_cli.commands.actions.providers.ModelManager") - async def test_provider_action_add_missing_args( - self, mock_model_manager, mock_get_context, mock_switch - ): - """Test provider action with add command missing arguments.""" - mock_context = MagicMock() - mock_context.model_manager = mock_model_manager.return_value - mock_get_context.return_value = mock_context - - # Missing api_base - will fall through to provider switching - await provider_action_async(ProviderActionParams(args=["add", "testai"])) - - # Should attempt to switch to "add" as provider with "testai" as model - mock_switch.assert_called_once_with( - mock_model_manager.return_value, "add", "testai", mock_context - ) diff --git a/tests/commands/actions/test_providers_edge_cases.py b/tests/commands/actions/test_providers_edge_cases.py deleted file mode 100644 index ee0b49e1..00000000 --- a/tests/commands/actions/test_providers_edge_cases.py +++ /dev/null @@ -1,415 +0,0 @@ -"""Edge case tests to push providers.py coverage to 90%+.""" - -import pytest -from unittest.mock import patch, MagicMock -from mcp_cli.commands.actions.providers import ( - _render_list_optimized, - _render_diagnostic_optimized, - _switch_provider_enhanced, - provider_action_async, -) -from mcp_cli.commands.models import ProviderActionParams - - -# ========== Render List Error Paths ========== - - -def test_render_list_token_manager_exception(): - """Test render list when TokenManager throws exception.""" - mock_manager = MagicMock() - mock_manager.get_active_provider.return_value = "anthropic" - mock_manager.get_available_providers.return_value = ["anthropic"] - mock_manager.get_available_models.return_value = ["claude-3"] - mock_manager.get_default_model.return_value = "claude-3" - - with patch("mcp_cli.auth.TokenManager", side_effect=Exception("Token error")): - with patch("mcp_cli.commands.actions.providers.output"): - with patch("mcp_cli.commands.actions.providers.format_table"): - # Should not raise exception, token_manager should be None - _render_list_optimized(mock_manager) - - -def test_render_list_no_providers(): - """Test render list when no providers found.""" - mock_manager = MagicMock() - mock_manager.get_available_providers.return_value = [] - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _render_list_optimized(mock_manager) - - mock_output.error.assert_called_with( - "No providers found. Check chuk-llm installation." - ) - - -def test_render_list_provider_get_models_exception(): - """Test render list when get_available_models throws exception for a provider.""" - mock_manager = MagicMock() - mock_manager.get_active_provider.return_value = "anthropic" - mock_manager.get_available_providers.return_value = ["anthropic", "error-provider"] - - def get_models_side_effect(provider): - if provider == "error-provider": - raise Exception("Model fetch error") - return ["claude-3"] - - mock_manager.get_available_models.side_effect = get_models_side_effect - mock_manager.get_default_model.return_value = "claude-3" - - with patch("mcp_cli.auth.TokenManager"): - with patch("mcp_cli.commands.actions.providers.output"): - with patch("mcp_cli.commands.actions.providers.format_table") as mock_table: - _render_list_optimized(mock_manager) - - # Should create table with error entry for error-provider - mock_table.assert_called_once() - table_data = mock_table.call_args[0][0] - # Check that error provider is in the table - error_entries = [ - row - for row in table_data - if row.get("Provider") - and "error" in row["Provider"].lower() - or "Error" in row.get("Status", "") - ] - assert len(error_entries) > 0 - - -def test_render_list_get_providers_exception(): - """Test render list when get_available_providers throws exception.""" - mock_manager = MagicMock() - mock_manager.get_available_providers.side_effect = Exception("Provider list error") - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _render_list_optimized(mock_manager) - - mock_output.error.assert_called() - assert "Error getting provider list" in str(mock_output.error.call_args) - - -def test_render_list_with_inactive_providers(): - """Test render list with inactive providers showing hints.""" - mock_manager = MagicMock() - mock_manager.get_active_provider.return_value = "ollama" - mock_manager.get_available_providers.return_value = ["ollama", "anthropic"] - - # Ollama has models, anthropic doesn't - def get_models_side_effect(provider): - if provider == "ollama": - return ["llama2"] - return [] - - mock_manager.get_available_models.side_effect = get_models_side_effect - - def get_default_side_effect(provider): - if provider == "ollama": - return "llama2" - return None - - mock_manager.get_default_model.side_effect = get_default_side_effect - - with patch("mcp_cli.auth.TokenManager"): - with patch( - "mcp_cli.commands.actions.providers._get_provider_status_enhanced" - ) as mock_status: - # Ollama ready, anthropic not ready - def status_side_effect(name, data): - from mcp_cli.commands.models.provider import ProviderStatus - - if name == "ollama": - return ProviderStatus(icon="✅", text="Ready", reason="Running") - return ProviderStatus( - icon="❌", text="Not Configured", reason="No API key" - ) - - mock_status.side_effect = status_side_effect - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - with patch("mcp_cli.commands.actions.providers.format_table"): - _render_list_optimized(mock_manager) - - # Should show hint for inactive provider - assert any( - "API key" in str(call) or "ANTHROPIC" in str(call) - for call in mock_output.hint.call_args_list - ) - - -def test_render_list_with_custom_providers_hint(): - """Test render list showing custom provider hint when none exist.""" - mock_manager = MagicMock() - mock_manager.get_active_provider.return_value = "anthropic" - mock_manager.get_available_providers.return_value = ["anthropic"] - mock_manager.get_available_models.return_value = ["claude-3"] - mock_manager.get_default_model.return_value = "claude-3" - - with patch("mcp_cli.auth.TokenManager"): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - with patch("mcp_cli.commands.actions.providers.format_table"): - _render_list_optimized(mock_manager) - - # Should show hint about adding custom providers (lines 349-352) - assert any( - "custom" in str(call).lower() or "add" in str(call).lower() - for call in mock_output.hint.call_args_list - ) - - -# ========== Render Diagnostic Error Paths ========== - - -def test_render_diagnostic_provider_exception(): - """Test render diagnostic when processing a provider throws exception.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = {"test": {}} - - with patch( - "mcp_cli.commands.actions.providers._dict_to_provider_data", - side_effect=Exception("Parse error"), - ): - with patch("mcp_cli.commands.actions.providers.output"): - with patch("mcp_cli.commands.actions.providers.format_table") as mock_table: - _render_diagnostic_optimized(mock_manager, "test") - - # Should create table with error entry - mock_table.assert_called_once() - - -def test_render_diagnostic_unknown_provider(): - """Test render diagnostic with unknown provider.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = False - mock_manager.get_available_providers.return_value = ["anthropic", "openai"] - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _render_diagnostic_optimized(mock_manager, "unknown") - - mock_output.error.assert_called_with("Unknown provider: unknown") - mock_output.warning.assert_called() - - -# ========== Switch Provider Edge Cases ========== - - -def test_switch_provider_get_available_models_exception(): - """Test switch provider when get_available_models throws exception.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "anthropic": {"has_api_key": True, "models": []} - } - mock_manager.get_default_model.return_value = None - mock_manager.get_available_models.side_effect = Exception("Models error") - mock_context = MagicMock() - - with patch("mcp_cli.commands.actions.providers.output"): - _switch_provider_enhanced(mock_manager, "anthropic", None, mock_context) - - # Should fall back to "default" - mock_manager.switch_model.assert_called_with("anthropic", "default") - - -def test_switch_provider_partial_setup_warning(): - """Test switch provider with partial setup warning.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "anthropic": {"has_api_key": True, "models": []} - } - mock_manager.get_default_model.return_value = "claude-3" - mock_context = MagicMock() - - with patch( - "mcp_cli.commands.actions.providers._get_provider_status_enhanced" - ) as mock_status: - from mcp_cli.commands.models.provider import ProviderStatus - - mock_status.return_value = ProviderStatus( - icon="⚠️", - text="Partial Setup", - reason="API key set but no models", - ) - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_manager, "anthropic", None, mock_context) - - # Should show warning but continue - mock_output.warning.assert_called() - # Check that "Continuing" or "Switching" message was shown - assert any( - "Continuing" in str(call) or "Switching" in str(call) - for call in mock_output.info.call_args_list - ) - mock_manager.switch_model.assert_called() - - -# ========== Provider Action Async Edge Cases ========== - - -@pytest.mark.asyncio -async def test_provider_action_no_model_manager(): - """Test provider action when model manager is not available.""" - mock_context = MagicMock() - mock_context.model_manager = None - - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - await provider_action_async(ProviderActionParams(args=["list"])) - - mock_output.error.assert_called_with("Model manager not available") - - -@pytest.mark.asyncio -async def test_provider_action_switch_with_model(): - """Test provider action switching with specific model.""" - mock_context = MagicMock() - mock_context.model_manager = MagicMock() - - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._switch_provider_enhanced" - ) as mock_switch: - await provider_action_async( - ProviderActionParams(args=["anthropic", "claude-3"]) - ) - - mock_switch.assert_called_once_with( - mock_context.model_manager, "anthropic", "claude-3", mock_context - ) - - -# ========== Dict to ProviderData Conversion ========== - - -def test_dict_to_provider_data_with_all_fields(): - """Test converting dict to ProviderData with all fields.""" - from mcp_cli.commands.actions.providers import _dict_to_provider_data - - data = { - "has_api_key": True, - "token_source": "env", - "models": ["model1", "model2"], - "available_models": ["model3"], - "default_model": "model1", - "baseline_features": ["streaming", "tools"], - "is_custom": True, - "api_base": "http://localhost", - "discovery_enabled": True, - "error": None, - } - - result = _dict_to_provider_data("test", data) - - assert result.name == "test" - assert result.has_api_key is True - assert result.models == ["model1", "model2"] - assert result.default_model == "model1" - - -def test_dict_to_provider_data_minimal(): - """Test converting minimal dict to ProviderData.""" - from mcp_cli.commands.actions.providers import _dict_to_provider_data - - data = {} - result = _dict_to_provider_data("test", data) - - assert result.name == "test" - assert result.has_api_key is False - assert result.models == [] - - -def test_dict_to_provider_data_with_provider_data(): - """Test that ProviderData is returned as-is.""" - from mcp_cli.commands.actions.providers import _dict_to_provider_data - from mcp_cli.commands.models.provider import ProviderData - - provider_data = ProviderData(name="test") - result = _dict_to_provider_data("test", provider_data) - - assert result is provider_data - - -# ========== Additional Edge Cases ========== - - -def test_render_list_inactive_custom_provider(): - """Test render list with inactive custom provider.""" - mock_manager = MagicMock() - mock_manager.get_active_provider.return_value = "anthropic" - mock_manager.get_available_providers.return_value = ["anthropic", "custom"] - - def get_models_side_effect(provider): - if provider == "anthropic": - return ["claude-3"] - return [] # Custom provider not configured - - mock_manager.get_available_models.side_effect = get_models_side_effect - mock_manager.get_default_model.side_effect = ( - lambda p: "claude-3" if p == "anthropic" else None - ) - - with patch("mcp_cli.auth.TokenManager"): - with patch( - "mcp_cli.commands.actions.providers._get_provider_status_enhanced" - ) as mock_status: - from mcp_cli.commands.models.provider import ProviderStatus - - def status_side_effect(name, data): - # Mark custom as not ready and custom - if name == "custom": - # Simulate is_custom being True - data.is_custom = True - return ProviderStatus( - icon="❌", text="Not Ready", reason="No config" - ) - return ProviderStatus(icon="✅", text="Ready", reason="OK") - - mock_status.side_effect = status_side_effect - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - with patch("mcp_cli.commands.actions.providers.format_table"): - _render_list_optimized(mock_manager) - - # Should show hints for inactive custom provider - assert mock_output.hint.called - - -def test_switch_provider_with_provider_error_key(): - """Test switch provider when provider has error key.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "test": {"error": "Provider is broken"} - } - mock_context = MagicMock() - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_manager, "test", None, mock_context) - - mock_output.error.assert_called_with("Provider error: Provider is broken") - - -# ========== Stub Functions ========== - - -def test_render_config_stub(): - """Test _render_config stub function.""" - from mcp_cli.commands.actions.providers import _render_config - - mock_manager = MagicMock() - # Should not raise exception, just pass - _render_config(mock_manager) - - -def test_mutate_stub(): - """Test _mutate stub function.""" - from mcp_cli.commands.actions.providers import _mutate - - mock_manager = MagicMock() - # Should not raise exception, just pass - _mutate(mock_manager, "anthropic", "api_key", "test") diff --git a/tests/commands/actions/test_providers_extended.py b/tests/commands/actions/test_providers_extended.py deleted file mode 100644 index d55568d7..00000000 --- a/tests/commands/actions/test_providers_extended.py +++ /dev/null @@ -1,539 +0,0 @@ -"""Extended tests for providers.py to reach 90%+ coverage.""" - -import pytest -from unittest.mock import patch, MagicMock -from mcp_cli.commands.actions.providers import ( - provider_action_async, - _add_custom_provider, - _remove_custom_provider, - _list_custom_providers, - _switch_provider_enhanced, -) -from mcp_cli.commands.models import ProviderActionParams - - -@pytest.fixture -def mock_context(): - """Create a mock application context.""" - context = MagicMock() - context.model_manager = MagicMock() - context.provider = "anthropic" - context.model = "claude-3" - return context - - -# ========== Custom Provider Tests ========== - - -def test_add_custom_provider_success(): - """Test adding a custom provider successfully.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("os.environ.get", return_value=None): - with patch("mcp_cli.commands.actions.providers.output"): - _add_custom_provider("localai", "http://localhost:8080/v1", ["gpt-4"]) - - mock_prefs.add_custom_provider.assert_called_once_with( - name="localai", - api_base="http://localhost:8080/v1", - models=["gpt-4"], - default_model="gpt-4", - ) - - -def test_add_custom_provider_already_exists(): - """Test adding a custom provider that already exists.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = True - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _add_custom_provider("localai", "http://localhost:8080/v1") - - mock_output.error.assert_called() - mock_prefs.add_custom_provider.assert_not_called() - - -def test_add_custom_provider_with_api_key(): - """Test adding provider when API key is already set.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("os.environ.get", return_value="test-key"): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _add_custom_provider("localai", "http://localhost:8080/v1") - - # Should show success message about API key being found - assert any( - "API Key" in str(call) - for call in mock_output.success.call_args_list - ) - - -def test_add_custom_provider_no_models(): - """Test adding provider without specifying models.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("os.environ.get", return_value=None): - with patch("mcp_cli.commands.actions.providers.output"): - _add_custom_provider("localai", "http://localhost:8080/v1", None) - - # Should use default models - mock_prefs.add_custom_provider.assert_called_once() - call_args = mock_prefs.add_custom_provider.call_args - assert call_args[1]["models"] == ["gpt-4", "gpt-3.5-turbo"] - - -def test_remove_custom_provider_success(): - """Test removing a custom provider successfully.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = True - mock_prefs.remove_custom_provider.return_value = True - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _remove_custom_provider("localai") - - mock_prefs.remove_custom_provider.assert_called_once_with("localai") - mock_output.success.assert_called() - - -def test_remove_custom_provider_not_custom(): - """Test removing a provider that's not custom.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = False - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _remove_custom_provider("openai") - - mock_output.error.assert_called() - mock_prefs.remove_custom_provider.assert_not_called() - - -def test_remove_custom_provider_failure(): - """Test removing a custom provider when removal fails.""" - mock_prefs = MagicMock() - mock_prefs.is_custom_provider.return_value = True - mock_prefs.remove_custom_provider.return_value = False - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _remove_custom_provider("localai") - - mock_output.error.assert_called_with("Failed to remove provider 'localai'") - - -def test_list_custom_providers_empty(): - """Test listing custom providers when none exist.""" - mock_prefs = MagicMock() - mock_prefs.get_custom_providers.return_value = {} - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _list_custom_providers() - - mock_output.info.assert_called_with("No custom providers configured.") - mock_output.tip.assert_called() - - -def test_list_custom_providers_with_providers(): - """Test listing custom providers with providers configured.""" - mock_prefs = MagicMock() - mock_prefs.get_custom_providers.return_value = { - "localai": { - "api_base": "http://localhost:8080/v1", - "models": ["gpt-4"], - "default_model": "gpt-4", - } - } - - mock_token_manager = MagicMock() - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.auth.TokenManager", return_value=mock_token_manager): - with patch( - "mcp_cli.auth.provider_tokens.get_provider_token_display_status", - return_value="✅ env", - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - with patch( - "mcp_cli.commands.actions.providers.format_table" - ) as mock_table: - _list_custom_providers() - - mock_table.assert_called_once() - mock_output.print_table.assert_called_once() - - -def test_list_custom_providers_token_manager_exception(): - """Test listing custom providers when TokenManager fails.""" - mock_prefs = MagicMock() - mock_prefs.get_custom_providers.return_value = { - "localai": { - "api_base": "http://localhost:8080/v1", - "models": ["gpt-4"], - "default_model": "gpt-4", - } - } - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.auth.TokenManager", side_effect=Exception("Token error")): - with patch( - "mcp_cli.auth.provider_tokens.get_provider_token_display_status", - return_value="❌ none", - ): - with patch("mcp_cli.commands.actions.providers.output"): - with patch("mcp_cli.commands.actions.providers.format_table"): - # Should not raise exception - _list_custom_providers() - - -def test_list_custom_providers_with_custom_env_var(): - """Test listing provider with custom env var name.""" - mock_prefs = MagicMock() - mock_prefs.get_custom_providers.return_value = { - "localai": { - "api_base": "http://localhost:8080/v1", - "models": ["gpt-4"], - "default_model": "gpt-4", - "env_var_name": "MY_CUSTOM_KEY", - } - } - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.auth.TokenManager", return_value=MagicMock()): - with patch("os.environ.get", return_value="test-key"): - with patch("mcp_cli.commands.actions.providers.output"): - with patch( - "mcp_cli.commands.actions.providers.format_table" - ) as mock_table: - _list_custom_providers() - - # Check that table was created with custom env var status - mock_table.assert_called_once() - table_data = mock_table.call_args[0][0] - assert "MY_CUSTOM_KEY" in str(table_data) - - -def test_list_custom_providers_custom_env_var_not_set(): - """Test listing provider with custom env var that's not set.""" - mock_prefs = MagicMock() - mock_prefs.get_custom_providers.return_value = { - "localai": { - "api_base": "http://localhost:8080/v1", - "models": ["gpt-4"], - "default_model": "gpt-4", - "env_var_name": "MY_CUSTOM_KEY", - } - } - - with patch( - "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_prefs - ): - with patch("mcp_cli.auth.TokenManager", return_value=MagicMock()): - with patch("os.environ.get", return_value=None): - with patch("mcp_cli.commands.actions.providers.output"): - with patch( - "mcp_cli.commands.actions.providers.format_table" - ) as mock_table: - _list_custom_providers() - - table_data = mock_table.call_args[0][0] - assert "not set" in str(table_data) - - -# ========== Provider Action Async Tests ========== - - -@pytest.mark.asyncio -async def test_provider_action_custom_command(mock_context): - """Test provider action with custom command.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._list_custom_providers" - ) as mock_list: - await provider_action_async(ProviderActionParams(args=["custom"])) - - mock_list.assert_called_once() - - -@pytest.mark.asyncio -async def test_provider_action_add_command(mock_context): - """Test provider action with add command.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._add_custom_provider" - ) as mock_add: - await provider_action_async( - ProviderActionParams( - args=["add", "localai", "http://localhost:8080/v1", "gpt-4"] - ) - ) - - mock_add.assert_called_once_with( - "localai", "http://localhost:8080/v1", ["gpt-4"] - ) - - -@pytest.mark.asyncio -async def test_provider_action_add_command_no_models(mock_context): - """Test provider action add without models.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._add_custom_provider" - ) as mock_add: - await provider_action_async( - ProviderActionParams( - args=["add", "localai", "http://localhost:8080/v1"] - ) - ) - - mock_add.assert_called_once_with( - "localai", "http://localhost:8080/v1", None - ) - - -@pytest.mark.asyncio -async def test_provider_action_remove_command(mock_context): - """Test provider action with remove command.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._remove_custom_provider" - ) as mock_remove: - await provider_action_async( - ProviderActionParams(args=["remove", "localai"]) - ) - - mock_remove.assert_called_once_with("localai") - - -@pytest.mark.asyncio -async def test_provider_action_config_command(mock_context): - """Test provider action with config command.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch("mcp_cli.commands.actions.providers._render_config") as mock_config: - await provider_action_async(ProviderActionParams(args=["config"])) - - mock_config.assert_called_once() - - -@pytest.mark.asyncio -async def test_provider_action_diagnostic_with_target(mock_context): - """Test provider action diagnostic with target.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers._render_diagnostic_optimized" - ) as mock_diag: - await provider_action_async( - ProviderActionParams(args=["diagnostic", "anthropic"]) - ) - - mock_diag.assert_called_once_with(mock_context.model_manager, "anthropic") - - -@pytest.mark.asyncio -async def test_provider_action_set_command(mock_context): - """Test provider action with set command.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch("mcp_cli.commands.actions.providers._mutate") as mock_mutate: - await provider_action_async( - ProviderActionParams(args=["set", "anthropic", "api_key", "test-key"]) - ) - - mock_mutate.assert_called_once_with( - mock_context.model_manager, "anthropic", "api_key", "test-key" - ) - - -@pytest.mark.asyncio -async def test_provider_action_set_command_no_value(mock_context): - """Test provider action set without value.""" - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch("mcp_cli.commands.actions.providers._mutate") as mock_mutate: - await provider_action_async( - ProviderActionParams(args=["set", "anthropic", "api_key"]) - ) - - mock_mutate.assert_called_once_with( - mock_context.model_manager, "anthropic", "api_key", None - ) - - -@pytest.mark.asyncio -async def test_provider_action_status_exception(mock_context): - """Test provider action status display with exception.""" - # Make the status check fail but not get_active_provider/model - mock_context.model_manager.get_active_provider.return_value = "anthropic" - mock_context.model_manager.get_active_model.return_value = "claude-3" - - with patch( - "mcp_cli.commands.actions.providers.get_context", return_value=mock_context - ): - with patch( - "mcp_cli.commands.actions.providers.ProviderData", - side_effect=Exception("Status error"), - ): - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - await provider_action_async(ProviderActionParams(args=[])) - - # Should fall back to simple display - mock_output.info.assert_called() - mock_output.warning.assert_called() - - -# ========== Switch Provider Edge Cases ========== - - -def test_switch_provider_model_specified(): - """Test switch provider with specific model.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "anthropic": {"has_api_key": True, "models": ["claude-3", "claude-2"]} - } - mock_context = MagicMock() - - with patch("mcp_cli.commands.actions.providers.output"): - _switch_provider_enhanced(mock_manager, "anthropic", "claude-2", mock_context) - - # Should switch to specified model - mock_manager.switch_model.assert_called_with("anthropic", "claude-2") - - -def test_switch_provider_get_default_model_exception(): - """Test switch provider when get_default_model throws exception.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "anthropic": {"has_api_key": True, "models": ["claude-3"]} - } - mock_manager.get_default_model.side_effect = Exception("Model error") - mock_manager.get_available_models.return_value = [] - mock_context = MagicMock() - - with patch("mcp_cli.commands.actions.providers.output"): - _switch_provider_enhanced(mock_manager, "anthropic", None, mock_context) - - # Should use "default" as fallback - mock_manager.switch_model.assert_called_with("anthropic", "default") - - -def test_switch_provider_no_default_fallback_to_first(): - """Test switch provider falling back to first available model.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "anthropic": {"has_api_key": True, "models": ["claude-3", "claude-2"]} - } - mock_manager.get_default_model.return_value = None - mock_manager.get_available_models.return_value = ["claude-3", "claude-2"] - mock_context = MagicMock() - - with patch("mcp_cli.commands.actions.providers.output"): - _switch_provider_enhanced(mock_manager, "anthropic", None, mock_context) - - # Should use first available model - mock_manager.switch_model.assert_called_with("anthropic", "claude-3") - - -def test_switch_provider_switch_model_exception(): - """Test switch provider when switch_model fails.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "anthropic": {"has_api_key": True, "models": ["claude-3"]} - } - mock_manager.get_default_model.return_value = "claude-3" - mock_manager.switch_model.side_effect = Exception("Switch failed") - mock_context = MagicMock() - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_manager, "anthropic", None, mock_context) - - mock_output.error.assert_called_with("Failed to switch provider: Switch failed") - - -def test_switch_provider_context_update_exception(): - """Test switch provider when context update fails.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.return_value = { - "anthropic": {"has_api_key": True, "models": ["claude-3"]} - } - mock_manager.get_default_model.return_value = "claude-3" - - # Create context that throws on property assignment - mock_context = MagicMock() - type(mock_context).provider = property( - lambda self: "old", - lambda self, v: (_ for _ in ()).throw(Exception("Context error")), - ) - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_manager, "anthropic", None, mock_context) - - # Should show warning but still succeed - assert any( - "Context" in str(call) for call in mock_output.warning.call_args_list - ) - mock_output.success.assert_called() - - -def test_switch_provider_validation_exception(): - """Test switch provider when validation throws exception.""" - mock_manager = MagicMock() - mock_manager.validate_provider.return_value = True - mock_manager.get_available_providers.side_effect = Exception("Validation error") - mock_manager.get_default_model.return_value = "claude-3" - mock_context = MagicMock() - - with patch("mcp_cli.commands.actions.providers.output") as mock_output: - _switch_provider_enhanced(mock_manager, "anthropic", None, mock_context) - - # Should show warning but continue - mock_output.warning.assert_called() - mock_manager.switch_model.assert_called() diff --git a/tests/commands/actions/test_resources_action.py b/tests/commands/actions/test_resources_action.py deleted file mode 100644 index c3216185..00000000 --- a/tests/commands/actions/test_resources_action.py +++ /dev/null @@ -1,170 +0,0 @@ -"""Tests for resources action.""" - -from unittest.mock import MagicMock, patch -import pytest - -from mcp_cli.commands.actions.resources import ( - resources_action_async, - resources_action, - _human_size, -) -from mcp_cli.commands.models import ResourceInfoResponse - - -def test_human_size(): - """Test the _human_size helper function.""" - assert _human_size(None) == "-" - assert _human_size(-1) == "-" - assert _human_size(0) == "0 B" - assert _human_size(500) == "500 B" - assert _human_size(1024) == "1 KB" - assert _human_size(1536) == "2 KB" # 1.5 KB rounded up - assert _human_size(1024 * 1024) == "1 MB" - assert _human_size(1024 * 1024 * 1024) == "1 GB" - assert _human_size(1024 * 1024 * 1024 * 1024) == "1.0 TB" - - -@pytest.fixture -def mock_context(): - """Create a mock context with tool manager.""" - context = MagicMock() - tool_manager = MagicMock() - context.tool_manager = tool_manager - return context, tool_manager - - -@pytest.fixture -def sample_resources(): - """Create sample resource data.""" - return [ - { - "server": "test_server", - "uri": "file:///test.txt", - "size": 1024, - "mimeType": "text/plain", - }, - { - "server": "another_server", - "uri": "file:///data.json", - "size": 2048, - "mimeType": "application/json", - }, - ] - - -@pytest.mark.asyncio -async def test_resources_action_async_basic(mock_context, sample_resources): - """Test basic resources action async.""" - context, tool_manager = mock_context - tool_manager.list_resources.return_value = sample_resources - - with ( - patch("mcp_cli.commands.actions.resources.get_context", return_value=context), - patch("mcp_cli.commands.actions.resources.output") as mock_output, - patch("mcp_cli.commands.actions.resources.format_table") as mock_format_table, - ): - mock_table = MagicMock() - mock_format_table.return_value = mock_table - - result = await resources_action_async() - - # Verify table creation and output - mock_format_table.assert_called_once() - table_data = mock_format_table.call_args[0][0] - assert len(table_data) == 2 - assert table_data[0]["Size"] == "1 KB" - assert table_data[1]["Size"] == "2 KB" - - mock_output.print_table.assert_called_once_with(mock_table) - - # Verify result - should be Pydantic models - assert len(result) == len(sample_resources) - assert all(isinstance(r, ResourceInfoResponse) for r in result) - assert result[0].uri == "file:///test.txt" - assert result[0].server == "test_server" - assert result[1].uri == "file:///data.json" - assert result[1].server == "another_server" - - -@pytest.mark.asyncio -async def test_resources_action_async_no_tool_manager(): - """Test resources action when no tool manager available.""" - context = MagicMock() - context.tool_manager = None - - with ( - patch("mcp_cli.commands.actions.resources.get_context", return_value=context), - patch("mcp_cli.commands.actions.resources.output") as mock_output, - ): - result = await resources_action_async() - - mock_output.error.assert_called_once_with("No tool manager available") - assert result == [] - - -@pytest.mark.asyncio -async def test_resources_action_async_no_resources(mock_context): - """Test resources action when no resources available.""" - context, tool_manager = mock_context - tool_manager.list_resources.return_value = [] - - with ( - patch("mcp_cli.commands.actions.resources.get_context", return_value=context), - patch("mcp_cli.commands.actions.resources.output") as mock_output, - ): - result = await resources_action_async() - - mock_output.info.assert_called_once_with("No resources recorded.") - assert result == [] - - -@pytest.mark.asyncio -async def test_resources_action_async_exception(mock_context): - """Test resources action when exception occurs.""" - context, tool_manager = mock_context - tool_manager.list_resources.side_effect = Exception("Test error") - - with ( - patch("mcp_cli.commands.actions.resources.get_context", return_value=context), - patch("mcp_cli.commands.actions.resources.output") as mock_output, - ): - result = await resources_action_async() - - mock_output.error.assert_called_once_with("Test error") - assert result == [] - - -@pytest.mark.asyncio -async def test_resources_action_async_awaitable_result(mock_context, sample_resources): - """Test resources action when list_resources returns an awaitable.""" - context, tool_manager = mock_context - - # Make list_resources return a coroutine - async def async_list_resources(): - return sample_resources - - tool_manager.list_resources.return_value = async_list_resources() - - with ( - patch("mcp_cli.commands.actions.resources.get_context", return_value=context), - patch("mcp_cli.commands.actions.resources.output"), - patch("mcp_cli.commands.actions.resources.format_table") as mock_format_table, - ): - mock_format_table.return_value = MagicMock() - - result = await resources_action_async() - - # Verify result - should be Pydantic models - assert len(result) == len(sample_resources) - assert all(isinstance(r, ResourceInfoResponse) for r in result) - - -def test_resources_action_sync_wrapper(): - """Test the sync wrapper function.""" - with patch("mcp_cli.commands.actions.resources.run_blocking") as mock_run_blocking: - mock_run_blocking.return_value = [] - - result = resources_action() - - mock_run_blocking.assert_called_once() - assert result == [] diff --git a/tests/commands/actions/test_servers_action.py b/tests/commands/actions/test_servers_action.py deleted file mode 100644 index b3d32307..00000000 --- a/tests/commands/actions/test_servers_action.py +++ /dev/null @@ -1,1515 +0,0 @@ -"""Comprehensive tests for servers action.""" - -import pytest -from unittest.mock import AsyncMock, MagicMock, Mock, patch - -from mcp_cli.commands.actions.servers import ( - _get_server_icon, - _format_capabilities, - _format_performance, - _get_server_status, - _list_servers, - _add_server, - _remove_server, - _enable_disable_server, - _show_server_details, - servers_action_async, - servers_action, -) -from mcp_cli.config.config_manager import ServerConfig -from mcp_cli.commands.models import ServerActionParams - - -class TestHelperFunctions: - """Test helper functions.""" - - def test_get_server_icon(self): - """Test _get_server_icon function.""" - # Full-featured server (resources + prompts) - capabilities = {"resources": True, "prompts": True, "tools": True} - assert _get_server_icon(capabilities, 5) == "🎯" - - # Resources only - capabilities = {"resources": True} - assert _get_server_icon(capabilities, 0) == "📁" - - # Prompts only - capabilities = {"prompts": True} - assert _get_server_icon(capabilities, 0) == "💬" - - # Tool-heavy server (>15 tools) - assert _get_server_icon({}, 20) == "🔧" - - # Basic tool server (1-15 tools) - assert _get_server_icon({}, 5) == "⚙️" - - # Minimal server (no tools) - assert _get_server_icon({}, 0) == "📦" - - def test_format_performance(self): - """Test _format_performance function.""" - # Excellent performance (<10ms) - icon, text = _format_performance(5.5) - assert icon == "🚀" - assert text == "5.5ms" - - # Good performance (<50ms) - icon, text = _format_performance(25.0) - assert icon == "✅" - assert text == "25.0ms" - - # OK performance (<100ms) - icon, text = _format_performance(75.0) - assert icon == "⚠️" - assert text == "75.0ms" - - # Poor performance (>=100ms) - icon, text = _format_performance(150.0) - assert icon == "🔴" - assert text == "150.0ms" - - # Unknown performance - icon, text = _format_performance(None) - assert icon == "❓" - assert text == "Unknown" - - def test_format_capabilities(self): - """Test _format_capabilities function.""" - # Multiple capabilities - capabilities = {"tools": True, "resources": True, "prompts": True} - result = _format_capabilities(capabilities) - assert "Tools" in result - assert "Resources" in result - assert "Prompts" in result - - # Single capability - capabilities = {"tools": True} - assert _format_capabilities(capabilities) == "Tools" - - # Experimental capabilities - capabilities = {"experimental": {"events": True, "streaming": True}} - result = _format_capabilities(capabilities) - assert "Events*" in result - assert "Streaming*" in result - - # No capabilities - assert _format_capabilities({}) == "None" - - def test_get_server_status_with_dict(self): - """Test _get_server_status with dictionary.""" - # Disabled server - server_config = {"disabled": True} - icon, text, reason = _get_server_status(server_config) - assert icon == "⏸️" - assert text == "Disabled" - assert reason == "Server is disabled" - - # Connected server - server_config = {"command": "test"} - icon, text, reason = _get_server_status(server_config, connected=True) - assert icon == "✅" - assert text == "Connected" - assert reason == "Server is active" - - # Not configured - server_config = {} - icon, text, reason = _get_server_status(server_config) - assert icon == "❌" - assert text == "Not Configured" - assert reason == "No command or URL specified" - - # HTTP server - server_config = {"url": "http://example.com", "transport": "http"} - icon, text, reason = _get_server_status(server_config) - assert icon == "🌐" - assert text == "HTTP" - assert reason == "URL: http://example.com" - - # STDIO server - server_config = {"command": "test-command"} - icon, text, reason = _get_server_status(server_config) - assert icon == "📡" - assert text == "STDIO" - assert reason == "Command: test-command" - - def test_get_server_status_with_server_config(self): - """Test _get_server_status with ServerConfig object.""" - # Disabled server - server_config = ServerConfig(name="test", disabled=True) - icon, text, reason = _get_server_status(server_config) - assert icon == "⏸️" - assert text == "Disabled" - assert reason == "Server is disabled" - - # Connected server - server_config = ServerConfig(name="test", command="test-cmd") - icon, text, reason = _get_server_status(server_config, connected=True) - assert icon == "✅" - assert text == "Connected" - assert reason == "Server is active" - - # HTTP server - server_config = ServerConfig(name="test", url="http://example.com") - icon, text, reason = _get_server_status(server_config) - assert icon == "🌐" - assert text == "HTTP" - assert "URL:" in reason - - # STDIO server - server_config = ServerConfig(name="test", command="test-command") - icon, text, reason = _get_server_status(server_config) - assert icon == "📡" - assert text == "STDIO" - assert "Command:" in reason - - -class TestListServers: - """Test _list_servers function.""" - - @pytest.mark.asyncio - async def test_list_servers_basic(self): - """Test basic server listing.""" - with patch("mcp_cli.commands.actions.servers.get_context") as mock_context: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch( - "mcp_cli.commands.actions.servers.output" - ) as mock_output: - # Setup mocks - mock_ctx = MagicMock() - mock_ctx.tool_manager = None - mock_context.return_value = mock_ctx - - # Mock config - mock_config = MagicMock() - test_server = ServerConfig(name="test", command="test-cmd") - mock_config.servers = {"test": test_server} - mock_config_mgr.return_value.get_config.return_value = ( - mock_config - ) - - # Mock preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_servers.return_value = {} - mock_prefs.is_server_disabled.return_value = False - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _list_servers() - - # Verify output was called - assert mock_output.rule.called - assert mock_output.print_table.called - - @pytest.mark.asyncio - async def test_list_servers_with_runtime(self): - """Test listing with runtime servers.""" - with patch("mcp_cli.commands.actions.servers.get_context") as mock_context: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output"): - # Setup mocks - mock_ctx = MagicMock() - mock_ctx.tool_manager = None - mock_context.return_value = mock_ctx - - # Mock config - mock_config = MagicMock() - mock_config.servers = {} - mock_config_mgr.return_value.get_config.return_value = ( - mock_config - ) - - # Mock preferences with runtime server - mock_prefs = MagicMock() - mock_prefs.get_runtime_servers.return_value = { - "runtime-test": { - "command": "runtime-cmd", - "transport": "stdio", - } - } - mock_prefs.is_server_disabled.return_value = False - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _list_servers(show_all=True) - - # Verify runtime servers were included - mock_prefs.get_runtime_servers.assert_called_once() - - -class TestAddServer: - """Test _add_server function.""" - - @pytest.mark.asyncio - async def test_add_stdio_server(self): - """Test adding a STDIO server.""" - with patch("mcp_cli.commands.actions.servers.ConfigManager") as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock config check - mock_config = MagicMock() - mock_config.servers = {} - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Mock preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = None - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _add_server( - name="test-server", - transport="stdio", - config_args=["test-command", "arg1", "arg2"], - env_vars={"TEST": "value"}, - ) - - # Verify server was added - mock_prefs.add_runtime_server.assert_called_once() - call_args = mock_prefs.add_runtime_server.call_args[0] - assert call_args[0] == "test-server" - server_config = call_args[1] - assert server_config["command"] == "test-command" - assert server_config["args"] == ["arg1", "arg2"] - assert server_config["env"] == {"TEST": "value"} - - # Verify success message - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_add_http_server(self): - """Test adding an HTTP server.""" - with patch("mcp_cli.commands.actions.servers.ConfigManager") as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output"): - # Mock config check - mock_config = MagicMock() - mock_config.servers = {} - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Mock preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = None - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _add_server( - name="api-server", - transport="http", - config_args=["https://api.example.com"], - headers={"Authorization": "Bearer token"}, - ) - - # Verify server was added - mock_prefs.add_runtime_server.assert_called_once() - call_args = mock_prefs.add_runtime_server.call_args[0] - assert call_args[0] == "api-server" - server_config = call_args[1] - assert server_config["url"] == "https://api.example.com" - assert server_config["transport"] == "http" - assert server_config["headers"] == {"Authorization": "Bearer token"} - - @pytest.mark.asyncio - async def test_add_duplicate_server(self): - """Test adding a duplicate server.""" - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock preferences - server already exists - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = {"command": "existing"} - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _add_server( - name="existing-server", - transport="stdio", - config_args=["test-command"], - ) - - # Verify error message - mock_output.error.assert_called() - assert "already exists" in str(mock_output.error.call_args) - - # Verify server was NOT added - mock_prefs.add_runtime_server.assert_not_called() - - @pytest.mark.asyncio - async def test_add_sse_server(self): - """Test adding an SSE server.""" - with patch("mcp_cli.commands.actions.servers.ConfigManager") as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock config check - mock_config = MagicMock() - mock_config.servers = {} - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Mock preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = None - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _add_server( - name="sse-server", - transport="sse", - config_args=["https://sse.example.com/events"], - ) - - # Verify server was added - mock_prefs.add_runtime_server.assert_called_once() - call_args = mock_prefs.add_runtime_server.call_args[0] - assert call_args[0] == "sse-server" - server_config = call_args[1] - assert server_config["url"] == "https://sse.example.com/events" - assert server_config["transport"] == "sse" - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_add_stdio_server_missing_command(self): - """Test adding STDIO server without command.""" - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - await _add_server( - name="test-server", - transport="stdio", - config_args=[], # No command - ) - - # Should show error - mock_output.error.assert_called_with("STDIO server requires a command") - - @pytest.mark.asyncio - async def test_add_http_server_missing_url(self): - """Test adding HTTP server without URL.""" - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - await _add_server( - name="test-server", - transport="http", - config_args=[], # No URL - ) - - # Should show error - mock_output.error.assert_called_with("HTTP server requires a URL") - - @pytest.mark.asyncio - async def test_add_invalid_transport(self): - """Test adding server with invalid transport.""" - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - await _add_server( - name="test-server", transport="invalid", config_args=["command"] - ) - - # Should show error - mock_output.error.assert_called_with("Unknown transport type: invalid") - - -class TestRemoveServer: - """Test _remove_server function.""" - - @pytest.mark.asyncio - async def test_remove_existing_server(self): - """Test removing an existing server.""" - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = {"command": "test"} - mock_prefs.remove_runtime_server.return_value = True - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _remove_server("test-server") - - # Verify server was removed - mock_prefs.remove_runtime_server.assert_called_once_with("test-server") - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_remove_nonexistent_server(self): - """Test removing a non-existent server.""" - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock preferences - server doesn't exist - mock_prefs = MagicMock() - mock_prefs.remove_runtime_server.return_value = ( - False # Server not found - ) - mock_prefs.get_runtime_servers.return_value = {} - mock_pref_mgr.return_value = mock_prefs - - # Mock config - server doesn't exist there either - mock_config = MagicMock() - mock_config.servers = {} - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Call function - await _remove_server("nonexistent") - - # Verify error message - mock_output.error.assert_called() - assert "not found" in str(mock_output.error.call_args) - - @pytest.mark.asyncio - async def test_remove_config_server(self): - """Test removing a server from project config.""" - with patch("mcp_cli.commands.actions.servers.ConfigManager") as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock preferences - server not in runtime (returns False) - mock_prefs = MagicMock() - mock_prefs.remove_runtime_server.return_value = False - mock_pref_mgr.return_value = mock_prefs - - # Mock config - server exists in config - mock_config = MagicMock() - test_server = ServerConfig(name="config-server", command="test") - mock_config.servers = {"config-server": test_server} - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Call function - await _remove_server("config-server") - - # Verify warning message - mock_output.warning.assert_called() - assert "project configuration" in str(mock_output.warning.call_args) - - -class TestEnableDisableServer: - """Test _enable_disable_server function.""" - - @pytest.mark.asyncio - async def test_enable_server(self): - """Test enabling a server.""" - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = {"command": "test"} - mock_pref_mgr.return_value = mock_prefs - - # Mock config - mock_config = MagicMock() - mock_config.get_server.return_value = None - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Call function to enable - await _enable_disable_server("test-server", enable=True) - - # Verify server was enabled - mock_prefs.enable_server.assert_called_once_with("test-server") - mock_output.success.assert_called() - - @pytest.mark.asyncio - async def test_disable_server(self): - """Test disabling a server.""" - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = {"command": "test"} - mock_pref_mgr.return_value = mock_prefs - - # Mock config - mock_config = MagicMock() - mock_config.get_server.return_value = None - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Call function to disable - await _enable_disable_server("test-server", enable=False) - - # Verify server was disabled - mock_prefs.disable_server.assert_called_once_with("test-server") - mock_output.success.assert_called() - - -class TestShowServerDetails: - """Test _show_server_details function.""" - - @pytest.mark.asyncio - async def test_show_server_details_project_server(self): - """Test showing details for a project server.""" - with patch("mcp_cli.commands.actions.servers.ConfigManager") as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock config with server - test_server = ServerConfig( - name="test-server", - command="uvx", - args=["test-server"], - env={"DEBUG": "true"}, - disabled=False, - ) - mock_config = MagicMock() - mock_config.servers = {"test-server": test_server} - mock_config.get_server.return_value = ( - test_server # Mock get_server method - ) - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Mock preferences - not found as runtime server - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = None # Not in runtime - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _show_server_details("test-server") - - # Verify output - mock_output.rule.assert_called() - # Details are printed with output.print - assert mock_output.print.called - - @pytest.mark.asyncio - async def test_show_server_details_runtime_server(self): - """Test showing details for a runtime server.""" - with patch("mcp_cli.commands.actions.servers.ConfigManager") as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock config without server - mock_config = MagicMock() - mock_config.servers = {} - mock_config.get_server.return_value = None - mock_config_mgr.return_value.get_config.return_value = mock_config - - # Mock preferences with runtime server - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = { - "transport": "http", - "url": "http://api.example.com", - "headers": {"Auth": "Bearer token"}, - } - mock_pref_mgr.return_value = mock_prefs - - # Call function - await _show_server_details("runtime-server") - - # Verify output - mock_output.rule.assert_called() - # Should print various info lines - assert mock_output.print.called - - @pytest.mark.asyncio - async def test_show_server_details_not_found(self): - """Test showing details for non-existent server.""" - with patch("mcp_cli.commands.actions.servers.ConfigManager") as mock_config_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref_mgr: - with patch( - "mcp_cli.commands.actions.servers.get_context" - ) as mock_get_context: - with patch( - "mcp_cli.commands.actions.servers.output" - ) as mock_output: - # Mock empty config - mock_config = MagicMock() - mock_config.servers = {} - mock_config.get_server.return_value = None - mock_config_mgr.return_value.get_config.return_value = ( - mock_config - ) - - # Mock empty preferences - mock_prefs = MagicMock() - mock_prefs.get_runtime_server.return_value = None - mock_pref_mgr.return_value = mock_prefs - - # Mock context with no matching server - mock_context = MagicMock() - mock_tm = AsyncMock() - mock_tm.get_server_info.return_value = [] # No servers - mock_context.tool_manager = mock_tm - mock_get_context.return_value = mock_context - - # Call function - await _show_server_details("nonexistent") - - # Verify error - mock_output.error.assert_called_with( - "Server 'nonexistent' not found" - ) - - -class TestServersActionAsync: - """Test servers_action_async function.""" - - @pytest.mark.asyncio - async def test_no_args_no_tool_manager(self): - """Test that no args with no tool manager shows error.""" - with patch("mcp_cli.commands.actions.servers.get_context") as mock_get_context: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock context with no tool manager - mock_context = MagicMock() - mock_context.tool_manager = None - mock_get_context.return_value = mock_context - - result = await servers_action_async(ServerActionParams()) - - # Should show error about no tool manager - mock_output.error.assert_called_with("No tool manager available") - assert result == [] - - @pytest.mark.asyncio - async def test_no_args_with_servers(self): - """Test that no args lists connected servers.""" - with patch("mcp_cli.commands.actions.servers.get_context") as mock_get_context: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock context with tool manager - mock_context = MagicMock() - mock_tm = AsyncMock() - - # Mock server info - mock_server = MagicMock() - mock_server.name = "test-server" - mock_server.transport = "stdio" - mock_server.tool_count = 5 - mock_server.capabilities = {"tools": True} - mock_server.display_status = "Connected" - - mock_tm.get_server_info.return_value = [mock_server] - mock_context.tool_manager = mock_tm - mock_get_context.return_value = mock_context - - result = await servers_action_async(ServerActionParams()) - - # Should display server info - assert mock_output.print_table.called - assert len(result) == 1 - assert result[0].name == "test-server" - - @pytest.mark.asyncio - async def test_list_command(self): - """Test list command.""" - with patch("mcp_cli.commands.actions.servers._list_servers") as mock_list: - mock_list.return_value = None - - result = await servers_action_async(ServerActionParams(args=["list"])) - - mock_list.assert_called_once_with(False) - assert result == [] - - @pytest.mark.asyncio - async def test_list_all_command(self): - """Test list all command.""" - with patch("mcp_cli.commands.actions.servers._list_servers") as mock_list: - mock_list.return_value = None - - await servers_action_async(ServerActionParams(args=["list", "all"])) - - mock_list.assert_called_once_with(True) - - @pytest.mark.asyncio - async def test_add_stdio_command(self): - """Test add STDIO server command.""" - with patch("mcp_cli.commands.actions.servers._add_server") as mock_add: - mock_add.return_value = None - - await servers_action_async( - ServerActionParams(args=["add", "test", "stdio", "command", "arg1"]) - ) - - mock_add.assert_called_once_with( - "test", "stdio", ["command", "arg1"], {}, {} - ) - - @pytest.mark.asyncio - async def test_add_http_with_options(self): - """Test add HTTP server with options.""" - with patch("mcp_cli.commands.actions.servers._add_server") as mock_add: - mock_add.return_value = None - - await servers_action_async( - ServerActionParams( - args=[ - "add", - "api", - "--transport", - "http", - "--header", - "Auth: Bearer token", - "--env", - "KEY=value", - "--", - "https://api.example.com", - ] - ) - ) - - mock_add.assert_called_once() - call_args = mock_add.call_args[0] # Use positional args - assert call_args[0] == "api" # name - assert call_args[1] == "http" # transport - assert call_args[2] == ["https://api.example.com"] # config_args - assert call_args[3] == {"KEY": "value"} # env_vars - assert call_args[4] == {"Auth": "Bearer token"} # headers - - @pytest.mark.asyncio - async def test_remove_command(self): - """Test remove server command.""" - with patch("mcp_cli.commands.actions.servers._remove_server") as mock_remove: - mock_remove.return_value = None - - await servers_action_async( - ServerActionParams(args=["remove", "test-server"]) - ) - - mock_remove.assert_called_once_with("test-server") - - @pytest.mark.asyncio - async def test_enable_command(self): - """Test enable server command.""" - with patch( - "mcp_cli.commands.actions.servers._enable_disable_server" - ) as mock_enable: - mock_enable.return_value = None - - await servers_action_async( - ServerActionParams(args=["enable", "test-server"]) - ) - - mock_enable.assert_called_once_with("test-server", True) - - @pytest.mark.asyncio - async def test_disable_command(self): - """Test disable server command.""" - with patch( - "mcp_cli.commands.actions.servers._enable_disable_server" - ) as mock_disable: - mock_disable.return_value = None - - await servers_action_async( - ServerActionParams(args=["disable", "test-server"]) - ) - - mock_disable.assert_called_once_with("test-server", False) - - @pytest.mark.asyncio - async def test_show_server_details(self): - """Test showing server details.""" - with patch( - "mcp_cli.commands.actions.servers._show_server_details" - ) as mock_show: - mock_show.return_value = None - - await servers_action_async(ServerActionParams(args=["test-server"])) - - mock_show.assert_called_once_with("test-server") - - -class TestServersAction: - """Test synchronous servers_action wrapper.""" - - def test_servers_action_wrapper(self): - """Test that servers_action properly wraps async function.""" - with patch("mcp_cli.commands.actions.servers.run_blocking") as mock_run: - mock_run.return_value = [{"name": "test"}] - - result = servers_action(args=["list"]) - - mock_run.assert_called_once() - assert result == [{"name": "test"}] - - -class TestListServersEdgeCases: - """Test edge cases in list_servers function.""" - - @pytest.mark.asyncio - async def test_list_servers_with_config_error(self): - """Test list_servers when config loading fails.""" - from mcp_cli.commands.actions.servers import _list_servers - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - # Setup mocks - mock_ctx.return_value = Mock(tool_manager=None) - mock_pref.return_value = Mock( - get_runtime_servers=Mock(return_value={}), - is_server_disabled=Mock(return_value=False), - ) - - # Make get_config raise RuntimeError - mock_config = Mock() - mock_config.get_config.side_effect = RuntimeError("Config error") - mock_config.initialize.return_value = Mock(servers={}) - mock_config_cls.return_value = mock_config - - with patch("mcp_cli.commands.actions.servers.output"): - # Should handle error and call initialize - await _list_servers() - mock_config.initialize.assert_called_once() - - @pytest.mark.asyncio - async def test_list_servers_no_servers(self): - """Test list_servers when no servers are configured.""" - from mcp_cli.commands.actions.servers import _list_servers - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output") as mock_out: - # Setup empty configs - mock_ctx.return_value = Mock(tool_manager=None) - mock_pref.return_value = Mock( - get_runtime_servers=Mock(return_value={}), - is_server_disabled=Mock(return_value=False), - ) - mock_config = Mock() - mock_config.get_config.return_value = Mock(servers={}) - mock_config_cls.return_value = mock_config - - await _list_servers() - - # Should show "No servers configured" message - mock_out.info.assert_called_with("No servers configured.") - assert mock_out.tip.call_count >= 2 - - @pytest.mark.asyncio - async def test_list_servers_with_tool_manager_error(self): - """Test list_servers when tool manager fails.""" - from mcp_cli.commands.actions.servers import _list_servers - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - # Tool manager that throws error - mock_tm = AsyncMock() - mock_tm.get_server_info.side_effect = Exception("TM error") - mock_ctx.return_value = Mock(tool_manager=mock_tm) - - mock_pref.return_value = Mock( - get_runtime_servers=Mock(return_value={}), - is_server_disabled=Mock(return_value=False), - ) - - mock_config = Mock() - mock_config.get_config.return_value = Mock(servers={}) - mock_config_cls.return_value = mock_config - - with patch("mcp_cli.commands.actions.servers.output"): - # Should handle error gracefully - await _list_servers() - # Function should complete without raising - - @pytest.mark.asyncio - async def test_list_servers_with_disabled_servers(self): - """Test list_servers filtering disabled servers.""" - from mcp_cli.commands.actions.servers import _list_servers - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - mock_ctx.return_value = Mock(tool_manager=None) - - # Mock a runtime server that's disabled - runtime_servers = {"disabled_server": {"command": "test"}} - mock_pref.return_value = Mock( - get_runtime_servers=Mock(return_value=runtime_servers), - is_server_disabled=Mock(return_value=True), - ) - - mock_config = Mock() - mock_config.get_config.return_value = Mock(servers={}) - mock_config_cls.return_value = mock_config - - # Should skip disabled server when show_all=False - await _list_servers(show_all=False) - # Server should be filtered out - - @pytest.mark.asyncio - async def test_list_servers_with_disabled_config_servers(self): - """Test list_servers filtering disabled config servers.""" - from mcp_cli.commands.actions.servers import _list_servers - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - mock_ctx.return_value = Mock(tool_manager=None) - mock_pref.return_value = Mock( - get_runtime_servers=Mock(return_value={}), - is_server_disabled=Mock(return_value=False), - ) - - # Mock a config server that's disabled - disabled_server = ServerConfig( - name="disabled", command="test", disabled=True - ) - mock_config = Mock() - mock_config.get_config.return_value = Mock( - servers={"disabled": disabled_server} - ) - mock_config_cls.return_value = mock_config - - # Should skip disabled server when show_all=False - await _list_servers(show_all=False) - # Server should be filtered out - - @pytest.mark.asyncio - async def test_list_servers_with_connected_tool_count(self): - """Test list_servers matching connected server tool counts.""" - from mcp_cli.commands.actions.servers import _list_servers - from mcp_cli.commands.models.responses import ServerInfoResponse - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - # Mock a connected server - connected = ServerInfoResponse( - name="test_server", - transport="stdio", - tool_count=5, - capabilities={}, - status="connected", - ping_ms=25.0, - ) - mock_tm = AsyncMock() - mock_tm.get_server_info = AsyncMock(return_value=[connected]) - mock_ctx.return_value = Mock(tool_manager=mock_tm) - - # Mock config with matching server - config_server = ServerConfig( - name="test_server", command="test", disabled=False - ) - mock_config = Mock() - mock_config.get_config.return_value = Mock( - servers={"test_server": config_server} - ) - mock_config_cls.return_value = mock_config - - mock_pref.return_value = Mock( - get_runtime_servers=Mock(return_value={}), - is_server_disabled=Mock(return_value=False), - ) - - # Should match tool count from connected server - await _list_servers() - # Lines 168-171 should be covered - - @pytest.mark.asyncio - async def test_list_servers_runtime_connected_tool_count(self): - """Test list_servers matching runtime server tool counts.""" - from mcp_cli.commands.actions.servers import _list_servers - from mcp_cli.commands.models.responses import ServerInfoResponse - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - # Mock a connected server - connected = ServerInfoResponse( - name="runtime_server", - transport="stdio", - tool_count=10, - capabilities={}, - status="connected", - ping_ms=30.0, - ) - mock_tm = AsyncMock() - mock_tm.get_server_info = AsyncMock(return_value=[connected]) - mock_ctx.return_value = Mock(tool_manager=mock_tm) - - # Mock runtime server - mock_pref.return_value = Mock( - get_runtime_servers=Mock( - return_value={"runtime_server": {"command": "test"}} - ), - is_server_disabled=Mock(return_value=False), - ) - - mock_config = Mock() - mock_config.get_config.return_value = Mock(servers={}) - mock_config_cls.return_value = mock_config - - # Should match tool count from connected server - await _list_servers() - # Lines 205-208 should be covered - - @pytest.mark.asyncio - async def test_list_servers_runtime_url_transport(self): - """Test list_servers with runtime server having URL.""" - from mcp_cli.commands.actions.servers import _list_servers - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - mock_ctx.return_value = Mock(tool_manager=None) - - # Mock runtime server with URL and transport - mock_pref.return_value = Mock( - get_runtime_servers=Mock( - return_value={ - "http_server": { - "url": "http://localhost:8080", - "transport": "http", - } - } - ), - is_server_disabled=Mock(return_value=False), - ) - - mock_config = Mock() - mock_config.get_config.return_value = Mock(servers={}) - mock_config_cls.return_value = mock_config - - # Should use transport from config - await _list_servers() - # Line 218 should be covered - - @pytest.mark.asyncio - async def test_list_servers_connected_not_in_config(self): - """Test list_servers with connected server not in config or runtime.""" - from mcp_cli.commands.actions.servers import _list_servers - from mcp_cli.commands.models.responses import ServerInfoResponse - - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - # Mock a connected server not in config or runtime - connected = ServerInfoResponse( - name="external_server", - transport="http", - tool_count=3, - capabilities={}, - status="connected", - ping_ms=15.0, - ) - mock_tm = AsyncMock() - mock_tm.get_server_info = AsyncMock(return_value=[connected]) - mock_ctx.return_value = Mock(tool_manager=mock_tm) - - mock_pref.return_value = Mock( - get_runtime_servers=Mock(return_value={}), - is_server_disabled=Mock(return_value=False), - ) - - mock_config = Mock() - mock_config.get_config.return_value = Mock(servers={}) - mock_config_cls.return_value = mock_config - - # Should add server as "Active" source - await _list_servers() - # Lines 234-246 should be covered - - -class TestAddServerEdgeCases: - """Test edge cases for adding servers.""" - - @pytest.mark.asyncio - async def test_add_server_exists_in_config(self): - """Test adding server that exists in project config.""" - from mcp_cli.commands.actions.servers import _add_server - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Mock runtime doesn't have it - mock_pref.return_value = Mock( - get_runtime_server=Mock(return_value=None) - ) - - # But config does have it - mock_config = Mock() - mock_config.get_config.return_value = Mock( - servers={ - "existing": ServerConfig(name="existing", command="test") - } - ) - mock_config_cls.return_value = mock_config - - # Try to add it - await _add_server("existing", "stdio", ["test"], None, None) - - # Should error - mock_output.error.assert_called() - # Lines 312-315 should be covered - - @pytest.mark.asyncio - async def test_add_server_with_env_vars(self): - """Test adding server with environment variables.""" - from mcp_cli.commands.actions.servers import _add_server - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - mock_pref_instance = Mock( - get_runtime_server=Mock(return_value=None), - add_runtime_server=Mock(), - ) - mock_pref.return_value = mock_pref_instance - - # Config doesn't have it - mock_config = Mock() - mock_config.get_config.side_effect = RuntimeError("No config") - mock_config_cls.return_value = mock_config - - # Add with env vars - env_vars = {"API_KEY": "secret"} - await _add_server("new_server", "stdio", ["test"], env_vars, None) - - # Should save with env vars - call_args = mock_pref_instance.add_runtime_server.call_args - assert call_args[0][1]["env"] == env_vars - # Line 331 should be covered - - @pytest.mark.asyncio - async def test_add_server_http_with_headers(self): - """Test adding HTTP server with headers.""" - from mcp_cli.commands.actions.servers import _add_server - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - mock_pref_instance = Mock( - get_runtime_server=Mock(return_value=None), - add_runtime_server=Mock(), - ) - mock_pref.return_value = mock_pref_instance - - mock_config = Mock() - mock_config.get_config.side_effect = RuntimeError("No config") - mock_config_cls.return_value = mock_config - - # Add HTTP server with headers and env - headers = {"Authorization": "Bearer token"} - env_vars = {"API_KEY": "secret"} - await _add_server( - "http_server", - "http", - ["http://localhost:8080"], - env_vars, - headers, - ) - - # Should save with headers and env - call_args = mock_pref_instance.add_runtime_server.call_args - assert call_args[0][1]["headers"] == headers - assert call_args[0][1]["env"] == env_vars - # Lines 342-345 should be covered - - @pytest.mark.asyncio - async def test_add_server_unknown_transport(self): - """Test adding server with unknown transport type.""" - from mcp_cli.commands.actions.servers import _add_server - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - mock_pref.return_value = Mock( - get_runtime_server=Mock(return_value=None) - ) - - mock_config = Mock() - mock_config.get_config.side_effect = RuntimeError("No config") - mock_config_cls.return_value = mock_config - - # Try to add with unknown transport - await _add_server("test", "unknown", ["config"], None, None) - - # Should error with transport message - error_calls = [ - str(call) for call in mock_output.error.call_args_list - ] - assert any("Unknown transport" in str(call) for call in error_calls) - mock_output.info.assert_called() - # Lines 347-349 should be covered - - -class TestEnableDisableServerEdgeCases: - """Test edge cases for enabling/disabling servers.""" - - @pytest.mark.asyncio - async def test_enable_server_config_error(self): - """Test enabling server when config has error.""" - from mcp_cli.commands.actions.servers import _enable_disable_server - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output"): - # Runtime doesn't have it - mock_pref_instance = Mock( - get_runtime_server=Mock(return_value=None), enable_server=Mock() - ) - mock_pref.return_value = mock_pref_instance - - # Config has error, then initialize returns empty - mock_config = Mock() - mock_config.get_config.side_effect = RuntimeError("Config error") - mock_config.initialize.return_value = Mock(servers={}) - mock_config.get_server.return_value = None - mock_config_cls.return_value = mock_config - - # Try to enable non-existent server - await _enable_disable_server("nonexistent", True) - - # Should initialize config and check - mock_config.initialize.assert_called_once() - # Lines 418-420 should be covered - - @pytest.mark.asyncio - async def test_disable_server_not_found(self): - """Test disabling server that doesn't exist in both runtime and config.""" - from mcp_cli.commands.actions.servers import _enable_disable_server - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - # Not in runtime - mock_pref.return_value = Mock( - get_runtime_server=Mock(return_value=None) - ) - - # Not in config either - mock_config = Mock() - config_obj = Mock(servers={}) - config_obj.get_server.return_value = None - mock_config.get_config.return_value = config_obj - mock_config_cls.return_value = mock_config - - # Try to disable - await _enable_disable_server("nonexistent", False) - - # Should error - server not found - assert mock_output.error.called - # Lines 422-424 should be covered - - -class TestShowServerDetailsEdgeCases: - """Test edge cases for showing server details.""" - - @pytest.mark.asyncio - async def test_show_details_config_error_fallback(self): - """Test show details when config has error but server is connected.""" - from mcp_cli.commands.actions.servers import _show_server_details - from mcp_cli.commands.models.responses import ServerInfoResponse - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch("mcp_cli.commands.actions.servers.output"): - # Not in runtime - mock_pref.return_value = Mock( - get_runtime_server=Mock(return_value=None) - ) - - # Config has error - mock_config = Mock() - mock_config.get_config.side_effect = RuntimeError( - "Config error" - ) - mock_config_cls.return_value = mock_config - - # But server is connected - connected = ServerInfoResponse( - name="connected_server", - transport="stdio", - tool_count=5, - capabilities={}, - status="connected", - ping_ms=20.0, - ) - mock_tm = AsyncMock() - mock_tm.get_server_info = AsyncMock(return_value=[connected]) - mock_ctx.return_value = Mock(tool_manager=mock_tm) - - # Show details - await _show_server_details("connected_server") - - # Should find connected server - mock_tm.get_server_info.assert_called_once() - # Lines 461-462, 474-476 should be covered - - @pytest.mark.asyncio - async def test_show_details_tool_manager_error(self): - """Test show details when tool manager errors.""" - from mcp_cli.commands.actions.servers import _show_server_details - - with patch( - "mcp_cli.commands.actions.servers.get_preference_manager" - ) as mock_pref: - with patch( - "mcp_cli.commands.actions.servers.ConfigManager" - ) as mock_config_cls: - with patch("mcp_cli.commands.actions.servers.get_context") as mock_ctx: - with patch( - "mcp_cli.commands.actions.servers.output" - ) as mock_output: - # Not in runtime or config - mock_pref.return_value = Mock( - get_runtime_server=Mock(return_value=None) - ) - - mock_config = Mock() - mock_config.get_config.side_effect = RuntimeError( - "Config error" - ) - mock_config_cls.return_value = mock_config - - # Tool manager errors - mock_tm = AsyncMock() - mock_tm.get_server_info = AsyncMock( - side_effect=Exception("TM error") - ) - mock_ctx.return_value = Mock(tool_manager=mock_tm) - - # Show details - await _show_server_details("nonexistent") - - # Should error gracefully - mock_output.error.assert_called() - # Lines 477-478 should be covered - - -class TestShowConnectedServerDetails: - """Test _show_connected_server_details function.""" - - @pytest.mark.asyncio - async def test_show_connected_server_details(self): - """Test showing connected server details.""" - from mcp_cli.commands.actions.servers import _show_connected_server_details - from mcp_cli.commands.models.responses import ServerInfoResponse - - server = ServerInfoResponse( - name="test_server", - transport="stdio", - tool_count=10, - capabilities={"tools": True, "resources": True}, - status="connected", - ping_ms=25.0, - ) - - with patch("mcp_cli.commands.actions.servers.output") as mock_output: - await _show_connected_server_details(server) - - # Should display server info - mock_output.rule.assert_called_once() - assert mock_output.print.call_count >= 4 - mock_output.tip.assert_called_once() - # Lines 544-557 should be covered - - -class TestGetServerStatusUnknownTransport: - """Test _get_server_status with unknown transport.""" - - def test_get_server_status_unknown_transport(self): - """Test _get_server_status with server having command but not matching stdio/http.""" - from mcp_cli.commands.actions.servers import _get_server_status - - # This actually tests line 107-108 (no command or URL) - # Line 117 is unreachable in current code logic - server = {"transport": "unknown"} - - icon, status, detail = _get_server_status(server, False) - - # Should return not configured (no command or URL) - assert icon == "❌" - assert status == "Not Configured" - # Lines 107-108 should be covered diff --git a/tests/commands/actions/test_theme_action.py b/tests/commands/actions/test_theme_action.py deleted file mode 100644 index b5bceb8e..00000000 --- a/tests/commands/actions/test_theme_action.py +++ /dev/null @@ -1,278 +0,0 @@ -"""Tests for the theme action command.""" - -import pytest -import asyncio -from unittest.mock import MagicMock, patch - -from mcp_cli.commands.actions.theme import ( - theme_command, - _interactive_theme_selection, - _show_theme_preview, - theme_action_async, -) -from mcp_cli.utils.preferences import Theme -from mcp_cli.commands.models import ThemeActionParams - - -@pytest.fixture -def mock_pref_manager(): - """Create a mock preference manager.""" - manager = MagicMock() - manager.get_theme.return_value = "default" - manager.set_theme.return_value = None - return manager - - -def test_theme_command_list_themes(mock_pref_manager): - """Test listing all available themes.""" - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - theme_command(list_themes=True) - - mock_output.rule.assert_called_with("Available Themes") - # Should print all themes - assert mock_output.print.call_count >= len(Theme) - mock_output.hint.assert_called_with("Use '/theme ' to switch themes") - - -def test_theme_command_no_args_lists_themes(mock_pref_manager): - """Test that no arguments also lists themes.""" - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - theme_command() - - mock_output.rule.assert_called_with("Available Themes") - mock_output.hint.assert_called() - - -def test_theme_command_list_themes_shows_current(mock_pref_manager): - """Test that listing themes highlights the current theme.""" - mock_pref_manager.get_theme.return_value = "dark" - - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - theme_command(list_themes=True) - - # Check that current theme is marked - info_calls = [str(call) for call in mock_output.info.call_args_list] - assert any("dark (current)" in str(call) for call in info_calls) - - -def test_theme_command_switch_valid_theme(mock_pref_manager): - """Test switching to a valid theme.""" - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch("mcp_cli.commands.actions.theme.set_theme") as mock_set_theme: - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - theme_command(theme_name="dark") - - mock_set_theme.assert_called_with("dark") - mock_pref_manager.set_theme.assert_called_with("dark") - mock_output.success.assert_called_with("Theme switched to: dark") - - -def test_theme_command_switch_invalid_theme(mock_pref_manager): - """Test switching to an invalid theme.""" - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - theme_command(theme_name="invalid-theme") - - mock_output.error.assert_called_with("Invalid theme: invalid-theme") - mock_output.hint.assert_called() - mock_pref_manager.set_theme.assert_not_called() - - -def test_theme_command_switch_case_insensitive(mock_pref_manager): - """Test that theme switching is case insensitive.""" - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch("mcp_cli.commands.actions.theme.set_theme") as mock_set_theme: - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - theme_command(theme_name="DARK") - - mock_set_theme.assert_called_with("dark") - mock_pref_manager.set_theme.assert_called_with("dark") - mock_output.success.assert_called() - - -def test_theme_command_switch_error_handling(mock_pref_manager): - """Test error handling when theme switch fails.""" - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch( - "mcp_cli.commands.actions.theme.set_theme", - side_effect=Exception("Test error"), - ): - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - theme_command(theme_name="dark") - - mock_output.error.assert_called_with( - "Failed to switch theme: Test error" - ) - - -def test_theme_command_select_interactive(mock_pref_manager): - """Test interactive theme selection.""" - with patch( - "mcp_cli.commands.actions.theme.get_preference_manager", - return_value=mock_pref_manager, - ): - with patch("asyncio.run") as mock_run: - theme_command(select=True) - - mock_run.assert_called_once() - # Check that _interactive_theme_selection was passed - call_args = mock_run.call_args[0][0] - assert hasattr(call_args, "__name__") or asyncio.iscoroutine(call_args) - - -@pytest.mark.asyncio -async def test_interactive_theme_selection_numeric_input(mock_pref_manager): - """Test interactive selection with numeric input.""" - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - with patch("mcp_cli.commands.actions.theme.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - with patch("mcp_cli.commands.actions.theme.ask", return_value="2"): - with patch( - "mcp_cli.commands.actions.theme.set_theme" - ) as mock_set_theme: - await _interactive_theme_selection(mock_pref_manager) - - # Should select the second theme (dark) - mock_set_theme.assert_called_with("dark") - mock_pref_manager.set_theme.assert_called_with("dark") - mock_output.success.assert_called() - - -@pytest.mark.asyncio -async def test_interactive_theme_selection_name_input(mock_pref_manager): - """Test interactive selection with theme name input.""" - with patch("mcp_cli.commands.actions.theme.output"): - with patch("mcp_cli.commands.actions.theme.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - with patch("mcp_cli.commands.actions.theme.ask", return_value="monokai"): - with patch( - "mcp_cli.commands.actions.theme.set_theme" - ) as mock_set_theme: - await _interactive_theme_selection(mock_pref_manager) - - mock_set_theme.assert_called_with("monokai") - mock_pref_manager.set_theme.assert_called_with("monokai") - - -@pytest.mark.asyncio -async def test_interactive_theme_selection_invalid_numeric(mock_pref_manager): - """Test interactive selection with invalid numeric input.""" - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - with patch("mcp_cli.commands.actions.theme.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - with patch("mcp_cli.commands.actions.theme.ask", return_value="99"): - await _interactive_theme_selection(mock_pref_manager) - - mock_output.error.assert_called() - mock_pref_manager.set_theme.assert_not_called() - - -@pytest.mark.asyncio -async def test_interactive_theme_selection_invalid_name(mock_pref_manager): - """Test interactive selection with invalid theme name.""" - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - with patch("mcp_cli.commands.actions.theme.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - with patch( - "mcp_cli.commands.actions.theme.ask", return_value="nonexistent" - ): - await _interactive_theme_selection(mock_pref_manager) - - mock_output.error.assert_called_with("Unknown theme: nonexistent") - mock_output.hint.assert_called() - mock_pref_manager.set_theme.assert_not_called() - - -@pytest.mark.asyncio -async def test_interactive_theme_selection_same_theme(mock_pref_manager): - """Test selecting the same theme that's already active.""" - mock_pref_manager.get_theme.return_value = "default" - - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - with patch("mcp_cli.commands.actions.theme.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - with patch("mcp_cli.commands.actions.theme.ask", return_value="default"): - await _interactive_theme_selection(mock_pref_manager) - - mock_output.info.assert_called_with("Already using theme: default") - mock_pref_manager.set_theme.assert_not_called() - - -@pytest.mark.asyncio -async def test_interactive_theme_selection_shows_preview(mock_pref_manager): - """Test that theme preview is shown after selection.""" - with patch("mcp_cli.commands.actions.theme.output"): - with patch("mcp_cli.commands.actions.theme.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - with patch("mcp_cli.commands.actions.theme.ask", return_value="dark"): - with patch("mcp_cli.commands.actions.theme.set_theme"): - with patch( - "mcp_cli.commands.actions.theme._show_theme_preview" - ) as mock_preview: - await _interactive_theme_selection(mock_pref_manager) - - mock_preview.assert_called_once() - - -def test_show_theme_preview(): - """Test theme preview display.""" - with patch("mcp_cli.commands.actions.theme.output") as mock_output: - _show_theme_preview() - - mock_output.print.assert_called_with("Theme Preview:") - mock_output.info.assert_called_with("Information message") - mock_output.success.assert_called_with("Success message") - mock_output.warning.assert_called_with("Warning message") - mock_output.error.assert_called_with("Error message") - mock_output.hint.assert_called_with("Hint message") - - -@pytest.mark.asyncio -async def test_theme_action_async_no_args(): - """Test async theme action with no arguments.""" - with patch("mcp_cli.commands.actions.theme.theme_command") as mock_command: - await theme_action_async(ThemeActionParams()) - - mock_command.assert_called_with(list_themes=True) - - -@pytest.mark.asyncio -async def test_theme_action_async_with_theme_name(): - """Test async theme action with theme name.""" - with patch("mcp_cli.commands.actions.theme.theme_command") as mock_command: - await theme_action_async(ThemeActionParams(theme_name="dark")) - - mock_command.assert_called_with(theme_name="dark") - - -@pytest.mark.asyncio -async def test_theme_action_async_multiple_args(): - """Test async theme action with multiple arguments (uses first one).""" - with patch("mcp_cli.commands.actions.theme.theme_command") as mock_command: - await theme_action_async(ThemeActionParams(theme_name="dark")) - - mock_command.assert_called_with(theme_name="dark") diff --git a/tests/commands/actions/test_token_action.py b/tests/commands/actions/test_token_action.py deleted file mode 100644 index 59b38d10..00000000 --- a/tests/commands/actions/test_token_action.py +++ /dev/null @@ -1,194 +0,0 @@ -"""Tests for token action with new server_names parameter.""" - -import pytest -from unittest.mock import Mock, patch -import time - -from mcp_cli.commands.models import TokenListParams -from mcp_cli.commands.actions.token import token_list_action_async -from mcp_cli.auth import OAuthTokens - - -def create_mock_manager(load_tokens_return=None): - """Helper to create a properly mocked token manager.""" - mock_manager = Mock() - mock_manager.load_tokens = Mock(return_value=load_tokens_return) - mock_registry = Mock() - mock_registry.list_tokens = Mock(return_value=[]) # No other tokens by default - mock_manager.registry = mock_registry - return mock_manager - - -class TestTokenListOAuthWithServers: - """Test OAuth token listing with server_names.""" - - @pytest.mark.asyncio - async def test_list_oauth_tokens_with_servers(self): - """Test listing OAuth tokens when servers provided.""" - mock_tokens = OAuthTokens( - access_token="test-token", - token_type="bearer", - expires_in=3600, - issued_at=time.time(), - ) - - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = create_mock_manager(mock_tokens) - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=["notion", "github"], - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) - - # Verify load_tokens was called for each server - assert mock_manager.load_tokens.call_count == 2 - - @pytest.mark.asyncio - async def test_list_oauth_tokens_with_expiration(self): - """Test OAuth token listing shows expiration correctly.""" - issued_at = time.time() - 1800 # 30 minutes ago - mock_tokens = OAuthTokens( - access_token="test-token", - token_type="bearer", - expires_in=3600, - issued_at=issued_at, - ) - - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = create_mock_manager(mock_tokens) - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=["notion"], - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_oauth_tokens_expired(self): - """Test OAuth token listing shows expired tokens.""" - issued_at = time.time() - 7200 # 2 hours ago - mock_tokens = OAuthTokens( - access_token="test-token", - token_type="bearer", - expires_in=3600, # 1 hour expiry - issued_at=issued_at, - ) - - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = create_mock_manager(mock_tokens) - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=["notion"], - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_oauth_tokens_without_issued_at(self): - """Test OAuth token listing when issued_at is None.""" - mock_tokens = OAuthTokens( - access_token="test-token", - token_type="bearer", - expires_in=3600, - issued_at=None, - ) - - with ( - patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm, - patch("time.time", return_value=1234567890.0), - ): - mock_manager = create_mock_manager(mock_tokens) - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=["notion"], - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_oauth_tokens_no_expiration(self): - """Test OAuth token listing when token has no expiration.""" - mock_tokens = OAuthTokens( - access_token="test-token", - token_type="bearer", - expires_in=None, - issued_at=time.time(), - ) - - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = create_mock_manager(mock_tokens) - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=["notion"], - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_oauth_tokens_no_servers(self): - """Test OAuth token listing when no servers configured.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = create_mock_manager() - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=[], # No servers - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_oauth_tokens_server_no_tokens(self): - """Test OAuth token listing when server has no tokens.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = create_mock_manager(None) # No tokens - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=["notion"], - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_oauth_tokens_with_registered_at(self): - """Test OAuth token listing shows created date.""" - issued_at = time.time() - mock_tokens = OAuthTokens( - access_token="test-token", - token_type="bearer", - expires_in=3600, - issued_at=issued_at, - ) - - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = create_mock_manager(mock_tokens) - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - server_names=["notion"], - show_oauth=True, - show_providers=False, - ) - - await token_list_action_async(params) diff --git a/tests/commands/actions/test_token_extended.py b/tests/commands/actions/test_token_extended.py deleted file mode 100644 index 4b52a101..00000000 --- a/tests/commands/actions/test_token_extended.py +++ /dev/null @@ -1,925 +0,0 @@ -"""Extended tests for token action to reach 90%+ coverage.""" - -import pytest -import time -from unittest.mock import Mock, patch - -from mcp_cli.commands.actions.token import ( - _get_token_manager, - token_list_action_async, - token_set_action_async, - token_get_action_async, - token_delete_action_async, - token_set_provider_action_async, - token_get_provider_action_async, - token_delete_provider_action_async, - token_clear_action_async, - token_backends_action_async, -) -from mcp_cli.commands.models import ( - TokenListParams, - TokenSetParams, - TokenDeleteParams, - TokenClearParams, - TokenProviderParams, -) -from mcp_cli.auth import TokenType, TokenStoreBackend - - -class TestGetTokenManager: - """Tests for _get_token_manager function.""" - - def test_get_token_manager_with_env_override(self): - """Test getting token manager with environment variable override.""" - with patch.dict("os.environ", {"MCP_CLI_TOKEN_BACKEND": "keychain"}): - with patch("mcp_cli.commands.actions.token.TokenManager") as mock_tm: - _get_token_manager() - - # Verify TokenManager was called with keychain backend - call_args = mock_tm.call_args - assert call_args[1]["backend"] == TokenStoreBackend.KEYCHAIN - - def test_get_token_manager_with_invalid_env_override(self): - """Test invalid environment variable falls back to config.""" - with patch.dict("os.environ", {"MCP_CLI_TOKEN_BACKEND": "invalid-backend"}): - with patch("mcp_cli.commands.actions.token.get_config") as mock_config: - mock_config.return_value.token_store_backend = "encrypted" - with patch("mcp_cli.commands.actions.token.TokenManager") as mock_tm: - _get_token_manager() - - # Should fall back to config - call_args = mock_tm.call_args - assert call_args[1]["backend"] == TokenStoreBackend.ENCRYPTED_FILE - - def test_get_token_manager_from_config(self): - """Test getting token manager from config when no env override.""" - with patch.dict("os.environ", {}, clear=True): - with patch("mcp_cli.commands.actions.token.get_config") as mock_config: - mock_config.return_value.token_store_backend = "encrypted" - with patch("mcp_cli.commands.actions.token.TokenManager") as mock_tm: - _get_token_manager() - - call_args = mock_tm.call_args - assert call_args[1]["backend"] == TokenStoreBackend.ENCRYPTED_FILE - - def test_get_token_manager_config_exception_fallback(self): - """Test config exception falls back to AUTO.""" - with patch.dict("os.environ", {}, clear=True): - with patch( - "mcp_cli.commands.actions.token.get_config", - side_effect=Exception("Config error"), - ): - with patch("mcp_cli.commands.actions.token.TokenManager") as mock_tm: - _get_token_manager() - - call_args = mock_tm.call_args - assert call_args[1]["backend"] == TokenStoreBackend.AUTO - - -class TestTokenListExtended: - """Extended tests for token_list_action_async.""" - - @pytest.mark.asyncio - async def test_list_provider_tokens(self): - """Test listing provider tokens.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.list_all_provider_tokens" - ) as mock_list_prov: - mock_manager = Mock() - mock_registry = Mock() - mock_registry.list_tokens = Mock(return_value=[]) - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - # Return provider tokens with env override - mock_list_prov.return_value = { - "openai": { - "env_var": "OPENAI_API_KEY", - "in_env": True, - }, - "anthropic": { - "env_var": "ANTHROPIC_API_KEY", - "in_env": False, - }, - } - - params = TokenListParams(show_providers=True, show_oauth=False) - await token_list_action_async(params) - - mock_list_prov.assert_called_once() - - @pytest.mark.asyncio - async def test_list_registry_tokens_bearer(self): - """Test listing bearer tokens from registry.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_registry = Mock() - - # Return bearer tokens - mock_registry.list_tokens.return_value = [ - { - "type": "bearer", - "name": "test-token", - "namespace": "generic", - "registered_at": time.time(), - "metadata": {"expires_at": time.time() + 3600}, - } - ] - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - show_bearer=True, show_api_keys=False, show_providers=False - ) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_registry_tokens_api_key(self): - """Test listing API key tokens from registry.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_registry = Mock() - - # Return API key tokens - mock_registry.list_tokens.return_value = [ - { - "type": "api-key", - "name": "test-api-key", - "namespace": "generic", - "registered_at": time.time(), - "metadata": {"provider": "custom-provider"}, - } - ] - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenListParams( - show_api_keys=True, show_bearer=False, show_providers=False - ) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_registry_tokens_expired(self): - """Test listing expired tokens from registry.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_registry = Mock() - - # Return expired token - mock_registry.list_tokens.return_value = [ - { - "type": "bearer", - "name": "expired-token", - "namespace": "generic", - "registered_at": time.time() - 7200, - "metadata": { - "expires_at": time.time() - 3600 - }, # Expired 1 hour ago - } - ] - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenListParams(show_bearer=True, show_providers=False) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_no_tokens(self): - """Test listing when no tokens found.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.list_all_provider_tokens", return_value={} - ): - mock_manager = Mock() - mock_registry = Mock() - mock_registry.list_tokens.return_value = [] - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenListParams(show_providers=True) - await token_list_action_async(params) - - @pytest.mark.asyncio - async def test_list_error_handling(self): - """Test error handling in token list.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - params = TokenListParams() - - with pytest.raises(Exception, match="Test error"): - await token_list_action_async(params) - - -class TestTokenSet: - """Tests for token_set_action_async.""" - - @pytest.mark.asyncio - async def test_set_bearer_token_with_value(self): - """Test setting bearer token with provided value.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_registry = Mock() - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenSetParams( - name="test-bearer", - token_type="bearer", - value="bearer-token-123", - namespace="generic", - ) - - await token_set_action_async(params) - - # Verify store and registry were called - mock_store._store_raw.assert_called_once() - mock_registry.register.assert_called_once() - - @pytest.mark.asyncio - async def test_set_bearer_token_prompt_for_value(self): - """Test setting bearer token with prompted value.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch("getpass.getpass", return_value="prompted-token"): - mock_manager = Mock() - mock_store = Mock() - mock_registry = Mock() - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenSetParams( - name="test-bearer", - token_type="bearer", - value=None, # Will be prompted - namespace="generic", - ) - - await token_set_action_async(params) - - mock_store._store_raw.assert_called_once() - - @pytest.mark.asyncio - async def test_set_bearer_token_empty_value(self): - """Test setting bearer token with empty value.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch("getpass.getpass", return_value=""): - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenSetParams( - name="test-bearer", - token_type="bearer", - value=None, - namespace="generic", - ) - - await token_set_action_async(params) - - # Should not store anything - - @pytest.mark.asyncio - async def test_set_api_key_token(self): - """Test setting API key token.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_registry = Mock() - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenSetParams( - name="test-api-key", - token_type="api-key", - value="api-key-123", - provider="custom-provider", - namespace="generic", - ) - - await token_set_action_async(params) - - mock_store._store_raw.assert_called_once() - mock_registry.register.assert_called_once() - - @pytest.mark.asyncio - async def test_set_api_key_token_no_provider(self): - """Test setting API key token without provider.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenSetParams( - name="test-api-key", - token_type="api-key", - value="api-key-123", - provider=None, - namespace="generic", - ) - - await token_set_action_async(params) - - @pytest.mark.asyncio - async def test_set_generic_token(self): - """Test setting generic token.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_registry = Mock() - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenSetParams( - name="test-generic", - token_type="generic", - value="generic-token-123", - namespace="custom", - ) - - await token_set_action_async(params) - - mock_store.store_generic.assert_called_once() - mock_registry.register.assert_called_once() - - @pytest.mark.asyncio - async def test_set_unknown_token_type(self): - """Test setting unknown token type.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenSetParams( - name="test-unknown", - token_type="unknown-type", - value="token-123", - namespace="generic", - ) - - await token_set_action_async(params) - - @pytest.mark.asyncio - async def test_set_error_handling(self): - """Test error handling in token set.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - params = TokenSetParams( - name="test", - token_type="bearer", - value="token", - namespace="generic", - ) - - with pytest.raises(Exception, match="Test error"): - await token_set_action_async(params) - - -class TestTokenGet: - """Tests for token_get_action_async.""" - - @pytest.mark.asyncio - async def test_get_token_success(self): - """Test getting token successfully.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - from mcp_cli.auth import StoredToken - - mock_manager = Mock() - mock_store = Mock() - - # Create a stored token - stored = StoredToken( - name="test-token", - token_type=TokenType.BEARER, - data={"token": "encrypted-value"}, - metadata={}, - ) - - mock_store._retrieve_raw.return_value = stored.model_dump_json() - mock_manager.token_store = mock_store - mock_get_tm.return_value = mock_manager - - with patch("mcp_cli.commands.actions.token.output"): - await token_get_action_async("test-token", "generic") - - mock_store._retrieve_raw.assert_called_once_with("generic:test-token") - - @pytest.mark.asyncio - async def test_get_token_not_found(self): - """Test getting token that doesn't exist.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_store._retrieve_raw.return_value = None - mock_manager.token_store = mock_store - mock_get_tm.return_value = mock_manager - - await token_get_action_async("missing-token", "generic") - - @pytest.mark.asyncio - async def test_get_token_parse_error(self): - """Test getting token with parse error.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_store._retrieve_raw.return_value = "invalid json" - mock_manager.token_store = mock_store - mock_get_tm.return_value = mock_manager - - await token_get_action_async("test-token", "generic") - - @pytest.mark.asyncio - async def test_get_token_error_handling(self): - """Test error handling in token get.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - with pytest.raises(Exception, match="Test error"): - await token_get_action_async("test-token", "generic") - - -class TestTokenDelete: - """Tests for token_delete_action_async.""" - - @pytest.mark.asyncio - async def test_delete_oauth_token(self): - """Test deleting OAuth token.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_manager.delete_tokens.return_value = True - mock_get_tm.return_value = mock_manager - - params = TokenDeleteParams(name="test-server", oauth=True) - await token_delete_action_async(params) - - mock_manager.delete_tokens.assert_called_once_with("test-server") - - @pytest.mark.asyncio - async def test_delete_oauth_token_not_found(self): - """Test deleting OAuth token that doesn't exist.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_manager.delete_tokens.return_value = False - mock_get_tm.return_value = mock_manager - - params = TokenDeleteParams(name="test-server", oauth=True) - await token_delete_action_async(params) - - @pytest.mark.asyncio - async def test_delete_generic_token_with_namespace(self): - """Test deleting generic token with specific namespace.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_store.delete_generic.return_value = True - mock_registry = Mock() - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenDeleteParams( - name="test-token", namespace="custom", oauth=False - ) - await token_delete_action_async(params) - - mock_store.delete_generic.assert_called_once_with("test-token", "custom") - mock_registry.unregister.assert_called_once() - - @pytest.mark.asyncio - async def test_delete_generic_token_search_all_namespaces(self): - """Test deleting generic token by searching all namespaces.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - - # Not found in first namespace, found in second - mock_store.delete_generic.side_effect = [False, True] - mock_registry = Mock() - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenDeleteParams(name="test-token", namespace=None, oauth=False) - await token_delete_action_async(params) - - @pytest.mark.asyncio - async def test_delete_token_not_found(self): - """Test deleting token that doesn't exist.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_store.delete_generic.return_value = False - mock_manager.token_store = mock_store - mock_get_tm.return_value = mock_manager - - params = TokenDeleteParams( - name="missing-token", namespace="generic", oauth=False - ) - await token_delete_action_async(params) - - @pytest.mark.asyncio - async def test_delete_error_handling(self): - """Test error handling in token delete.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - params = TokenDeleteParams(name="test-token") - - with pytest.raises(Exception, match="Test error"): - await token_delete_action_async(params) - - -class TestTokenSetProvider: - """Tests for token_set_provider_action_async.""" - - @pytest.mark.asyncio - async def test_set_provider_token_with_value(self): - """Test setting provider token with provided value.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.set_provider_token", return_value=True - ) as mock_set: - with patch( - "mcp_cli.auth.provider_tokens.get_provider_env_var_name", - return_value="OPENAI_API_KEY", - ): - with patch.dict("os.environ", {}, clear=True): - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenProviderParams( - provider="openai", api_key="sk-test123" - ) - await token_set_provider_action_async(params) - - mock_set.assert_called_once_with( - "openai", "sk-test123", mock_manager - ) - - @pytest.mark.asyncio - async def test_set_provider_token_prompt_for_value(self): - """Test setting provider token with prompted value.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.set_provider_token", return_value=True - ): - with patch( - "mcp_cli.auth.provider_tokens.get_provider_env_var_name", - return_value="ANTHROPIC_API_KEY", - ): - with patch("getpass.getpass", return_value="prompted-key"): - with patch.dict("os.environ", {}, clear=True): - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenProviderParams( - provider="anthropic", api_key=None - ) - await token_set_provider_action_async(params) - - @pytest.mark.asyncio - async def test_set_provider_token_empty_value(self): - """Test setting provider token with empty value.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch("getpass.getpass", return_value=""): - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenProviderParams(provider="openai", api_key=None) - await token_set_provider_action_async(params) - - @pytest.mark.asyncio - async def test_set_provider_token_with_env_var_set(self): - """Test setting provider token when env var is also set.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.set_provider_token", return_value=True - ): - with patch( - "mcp_cli.auth.provider_tokens.get_provider_env_var_name", - return_value="OPENAI_API_KEY", - ): - with patch.dict("os.environ", {"OPENAI_API_KEY": "env-key"}): - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenProviderParams( - provider="openai", api_key="sk-test123" - ) - await token_set_provider_action_async(params) - - @pytest.mark.asyncio - async def test_set_provider_token_failure(self): - """Test provider token set failure.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.set_provider_token", return_value=False - ): - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenProviderParams(provider="openai", api_key="sk-test123") - await token_set_provider_action_async(params) - - @pytest.mark.asyncio - async def test_set_provider_error_handling(self): - """Test error handling in set provider.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - params = TokenProviderParams(provider="openai", api_key="sk-test123") - - with pytest.raises(Exception, match="Test error"): - await token_set_provider_action_async(params) - - -class TestTokenGetProvider: - """Tests for token_get_provider_action_async.""" - - @pytest.mark.asyncio - async def test_get_provider_token_configured(self): - """Test getting provider token that is configured.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status" - ) as mock_check: - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - mock_check.return_value = { - "has_token": True, - "source": "storage", - "env_var": "OPENAI_API_KEY", - "in_env": False, - "in_storage": True, - } - - params = TokenProviderParams(provider="openai") - await token_get_provider_action_async(params) - - @pytest.mark.asyncio - async def test_get_provider_token_not_configured(self): - """Test getting provider token that is not configured.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status" - ) as mock_check: - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - mock_check.return_value = { - "has_token": False, - "source": None, - "env_var": "ANTHROPIC_API_KEY", - "in_env": False, - "in_storage": False, - } - - params = TokenProviderParams(provider="anthropic") - await token_get_provider_action_async(params) - - @pytest.mark.asyncio - async def test_get_provider_error_handling(self): - """Test error handling in get provider.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - params = TokenProviderParams(provider="openai") - - with pytest.raises(Exception, match="Test error"): - await token_get_provider_action_async(params) - - -class TestTokenDeleteProvider: - """Tests for token_delete_provider_action_async.""" - - @pytest.mark.asyncio - async def test_delete_provider_token_configured(self): - """Test deleting provider token that is configured.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status" - ) as mock_check: - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - mock_check.return_value = { - "has_token": True, - "source": "storage", - "env_var": "OPENAI_API_KEY", - "in_env": False, - "in_storage": True, - } - - params = TokenProviderParams(provider="openai") - await token_delete_provider_action_async(params) - - @pytest.mark.asyncio - async def test_delete_provider_token_not_configured(self): - """Test deleting provider token that is not configured.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch( - "mcp_cli.auth.provider_tokens.check_provider_token_status" - ) as mock_check: - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - mock_check.return_value = { - "has_token": False, - "source": None, - "env_var": "OPENAI_API_KEY", - "in_env": False, - "in_storage": False, - } - - params = TokenProviderParams(provider="openai") - await token_delete_provider_action_async(params) - - @pytest.mark.asyncio - async def test_delete_provider_error_handling(self): - """Test error handling in delete provider.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - params = TokenProviderParams(provider="openai") - - with pytest.raises(Exception, match="Test error"): - await token_delete_provider_action_async(params) - - -class TestTokenClear: - """Tests for token_clear_action_async.""" - - @pytest.mark.asyncio - async def test_clear_tokens_with_force(self): - """Test clearing tokens with force flag.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_registry = Mock() - - mock_registry.list_tokens.return_value = [ - {"name": "token1", "namespace": "generic"}, - {"name": "token2", "namespace": "generic"}, - ] - mock_store.delete_generic.return_value = True - - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenClearParams(namespace="generic", force=True) - await token_clear_action_async(params) - - # Should delete both tokens - assert mock_store.delete_generic.call_count == 2 - mock_registry.clear_namespace.assert_called_once_with("generic") - - @pytest.mark.asyncio - async def test_clear_all_tokens_with_force(self): - """Test clearing all tokens with force flag.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_registry = Mock() - - mock_registry.list_tokens.return_value = [ - {"name": "token1", "namespace": "generic"}, - ] - mock_store.delete_generic.return_value = True - - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenClearParams(namespace=None, force=True) - await token_clear_action_async(params) - - mock_registry.clear_all.assert_called_once() - - @pytest.mark.asyncio - async def test_clear_tokens_cancelled(self): - """Test clearing tokens when user cancels.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - with patch("chuk_term.ui.prompts.confirm", return_value=False): - mock_manager = Mock() - mock_get_tm.return_value = mock_manager - - params = TokenClearParams(namespace="generic", force=False) - await token_clear_action_async(params) - - @pytest.mark.asyncio - async def test_clear_tokens_no_tokens(self): - """Test clearing tokens when none exist.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_registry = Mock() - mock_registry.list_tokens.return_value = [] - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenClearParams(namespace="generic", force=True) - await token_clear_action_async(params) - - @pytest.mark.asyncio - async def test_clear_tokens_partial_deletion(self): - """Test clearing tokens when some deletions fail.""" - with patch("mcp_cli.commands.actions.token._get_token_manager") as mock_get_tm: - mock_manager = Mock() - mock_store = Mock() - mock_registry = Mock() - - mock_registry.list_tokens.return_value = [ - {"name": "token1", "namespace": "generic"}, - {"name": "token2", "namespace": "generic"}, - ] - # First deletion succeeds, second fails - mock_store.delete_generic.side_effect = [True, False] - - mock_manager.token_store = mock_store - mock_manager.registry = mock_registry - mock_get_tm.return_value = mock_manager - - params = TokenClearParams(namespace="generic", force=True) - await token_clear_action_async(params) - - @pytest.mark.asyncio - async def test_clear_error_handling(self): - """Test error handling in token clear.""" - with patch( - "mcp_cli.commands.actions.token._get_token_manager", - side_effect=Exception("Test error"), - ): - params = TokenClearParams(force=True) - - with pytest.raises(Exception, match="Test error"): - await token_clear_action_async(params) - - -class TestTokenBackends: - """Tests for token_backends_action_async.""" - - @pytest.mark.asyncio - async def test_backends_list_with_override(self): - """Test listing backends with CLI override.""" - with patch.dict("os.environ", {"MCP_CLI_TOKEN_BACKEND": "keychain"}): - with patch( - "mcp_cli.commands.actions.token.TokenStoreFactory" - ) as mock_factory: - mock_factory.get_available_backends.return_value = [ - TokenStoreBackend.KEYCHAIN, - TokenStoreBackend.ENCRYPTED_FILE, - ] - mock_factory._detect_backend.return_value = TokenStoreBackend.KEYCHAIN - - await token_backends_action_async() - - @pytest.mark.asyncio - async def test_backends_list_invalid_override(self): - """Test listing backends with invalid CLI override.""" - with patch.dict("os.environ", {"MCP_CLI_TOKEN_BACKEND": "invalid"}): - with patch( - "mcp_cli.commands.actions.token.TokenStoreFactory" - ) as mock_factory: - mock_factory.get_available_backends.return_value = [ - TokenStoreBackend.ENCRYPTED_FILE, - ] - mock_factory._detect_backend.return_value = ( - TokenStoreBackend.ENCRYPTED_FILE - ) - - await token_backends_action_async() - - @pytest.mark.asyncio - async def test_backends_list_no_override(self): - """Test listing backends without CLI override.""" - with patch.dict("os.environ", {}, clear=True): - with patch( - "mcp_cli.commands.actions.token.TokenStoreFactory" - ) as mock_factory: - mock_factory.get_available_backends.return_value = [ - TokenStoreBackend.KEYCHAIN, - ] - mock_factory._detect_backend.return_value = TokenStoreBackend.KEYCHAIN - - await token_backends_action_async() - - @pytest.mark.asyncio - async def test_backends_error_handling(self): - """Test error handling in backends list.""" - with patch( - "mcp_cli.commands.actions.token.TokenStoreFactory.get_available_backends", - side_effect=Exception("Test error"), - ): - with pytest.raises(Exception, match="Test error"): - await token_backends_action_async() diff --git a/tests/commands/actions/test_tools_action.py b/tests/commands/actions/test_tools_action.py deleted file mode 100644 index e63b4d21..00000000 --- a/tests/commands/actions/test_tools_action.py +++ /dev/null @@ -1,132 +0,0 @@ -"""Tests for tools action.""" - -import json -from unittest.mock import AsyncMock, MagicMock, patch -import pytest - -from mcp_cli.commands.actions.tools_confirm import tools_action_async, tools_action -from mcp_cli.tools.models import ToolInfo - - -@pytest.fixture -def mock_tool_manager(): - """Create a mock ToolManager.""" - tm = MagicMock() - tm.get_unique_tools = AsyncMock() - return tm - - -@pytest.fixture -def sample_tools(): - """Create sample tool data.""" - return [ - ToolInfo( - name="test_tool", - namespace="test_server", - description="A test tool", - parameters={"type": "object", "properties": {}}, - ), - ToolInfo( - name="another_tool", - namespace="another_server", - description="Another tool", - parameters={"type": "object", "properties": {"arg": {"type": "string"}}}, - ), - ] - - -@pytest.mark.asyncio -async def test_tools_action_async_basic(mock_tool_manager, sample_tools): - """Test basic tools action async.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with ( - patch("mcp_cli.commands.actions.tools_confirm.output") as mock_output, - patch( - "mcp_cli.commands.actions.tools_confirm.create_tools_table" - ) as mock_create_table, - ): - mock_table = MagicMock() - mock_create_table.return_value = mock_table - - result = await tools_action_async(mock_tool_manager) - - # Verify output calls - mock_output.info.assert_called_once_with( - "\nFetching tool catalogue from all servers…" - ) - mock_output.print_table.assert_called_once_with(mock_table) - mock_output.success.assert_called_once_with("Total tools available: 2") - - # Verify result structure - assert len(result) == 2 - assert result[0]["name"] == "test_tool" - assert result[0]["namespace"] == "test_server" - assert result[1]["name"] == "another_tool" - - -@pytest.mark.asyncio -async def test_tools_action_async_no_tools(mock_tool_manager): - """Test tools action when no tools available.""" - mock_tool_manager.get_unique_tools.return_value = [] - - with patch("mcp_cli.commands.actions.tools_confirm.output") as mock_output: - result = await tools_action_async(mock_tool_manager) - - mock_output.info.assert_called_once() - mock_output.warning.assert_called_once_with( - "No tools available from any server." - ) - assert result == [] - - -@pytest.mark.asyncio -async def test_tools_action_async_raw_mode(mock_tool_manager, sample_tools): - """Test tools action with raw JSON output.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch("mcp_cli.commands.actions.tools_confirm.output") as mock_output: - await tools_action_async(mock_tool_manager, show_raw=True) - - # Should call json output instead of table - mock_output.json.assert_called_once() - - # Verify JSON structure was output - call_args = mock_output.json.call_args[0][0] - json_data = json.loads(call_args) - assert len(json_data) == 2 - assert json_data[0]["name"] == "test_tool" - - -@pytest.mark.asyncio -async def test_tools_action_async_show_details(mock_tool_manager, sample_tools): - """Test tools action with show_details=True.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with ( - patch("mcp_cli.commands.actions.tools_confirm.output"), - patch( - "mcp_cli.commands.actions.tools_confirm.create_tools_table" - ) as mock_create_table, - ): - mock_table = MagicMock() - mock_create_table.return_value = mock_table - - await tools_action_async(mock_tool_manager, show_details=True) - - # Verify create_tools_table was called with show_details=True - mock_create_table.assert_called_once_with(sample_tools, show_details=True) - - -def test_tools_action_sync_wrapper(mock_tool_manager): - """Test the sync wrapper function.""" - with patch( - "mcp_cli.commands.actions.tools_confirm.run_blocking" - ) as mock_run_blocking: - mock_run_blocking.return_value = [] - - result = tools_action(mock_tool_manager, show_details=True, show_raw=False) - - # Verify run_blocking was called - mock_run_blocking.assert_called_once() - assert result == [] diff --git a/tests/commands/actions/test_tools_action_improved.py b/tests/commands/actions/test_tools_action_improved.py deleted file mode 100644 index b73aa373..00000000 --- a/tests/commands/actions/test_tools_action_improved.py +++ /dev/null @@ -1,394 +0,0 @@ -"""Improved tests for tools action with higher coverage.""" - -import json -from unittest.mock import AsyncMock, MagicMock, patch -import pytest - -from mcp_cli.commands.actions.tools import ( - tools_action_async, - _show_validation_info, -) -from mcp_cli.tools.models import ToolInfo - - -@pytest.fixture -def mock_context(): - """Create a mock application context.""" - context = MagicMock() - context.tool_manager = MagicMock() - return context - - -@pytest.fixture -def mock_tool_manager(): - """Create a mock ToolManager.""" - tm = MagicMock() - tm.get_unique_tools = AsyncMock() - tm.get_adapted_tools_for_llm = AsyncMock() - tm.get_validation_summary = MagicMock(return_value={}) - tm.is_auto_fix_enabled = MagicMock(return_value=True) - return tm - - -@pytest.fixture -def sample_tools(): - """Create sample tool data.""" - return [ - ToolInfo( - name="test_tool", - namespace="test_server", - description="A test tool", - parameters={"type": "object", "properties": {}}, - ), - ToolInfo( - name="another_tool", - namespace="another_server", - description="Another tool", - parameters={"type": "object", "properties": {"arg": {"type": "string"}}}, - ), - ] - - -@pytest.fixture -def adapted_tools(): - """Create adapted tool definitions for LLM.""" - return [ - { - "function": { - "name": "test_server_test_tool", - "description": "A test tool", - "parameters": {"type": "object", "properties": {}}, - } - }, - { - "function": { - "name": "another_tool", - "description": "Another tool", - "parameters": { - "type": "object", - "properties": {"arg": {"type": "string"}}, - }, - } - }, - ] - - -@pytest.mark.asyncio -async def test_tools_action_async_no_tool_manager(mock_context): - """Test tools action when tool manager is not available.""" - mock_context.tool_manager = None - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - result = await tools_action_async() - - mock_output.error.assert_called_with("No tool manager available") - assert result == [] - - -@pytest.mark.asyncio -async def test_tools_action_async_basic(mock_context, mock_tool_manager, sample_tools): - """Test basic tools action async.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - with patch( - "mcp_cli.commands.actions.tools.create_tools_table" - ) as mock_create_table: - mock_table = MagicMock() - mock_create_table.return_value = mock_table - - result = await tools_action_async() - - # Verify output calls - mock_output.info.assert_called_with( - "\nFetching tool catalogue from all servers…" - ) - mock_output.print_table.assert_called_once_with(mock_table) - mock_output.success.assert_called_with("Total tools available: 2") - - # Verify result structure - assert len(result) == 2 - assert result[0]["name"] == "test_tool" - assert result[0]["namespace"] == "test_server" - assert result[1]["name"] == "another_tool" - - -@pytest.mark.asyncio -async def test_tools_action_async_with_validation(mock_context, mock_tool_manager): - """Test tools action with validation flag.""" - mock_tool_manager.get_validation_summary.return_value = { - "total_tools": 10, - "valid_tools": 8, - "invalid_tools": 2, - "disabled_by_user": 1, - "disabled_by_validation": 1, - "validation_errors": [ - {"tool": "bad_tool", "error": "Invalid schema", "reason": "schema_error"} - ], - "disabled_tools": {"disabled_tool": "User disabled"}, - } - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - with patch( - "mcp_cli.commands.actions.tools.format_table" - ) as mock_format_table: - mock_format_table.return_value = "formatted_table" - - result = await tools_action_async(show_validation=True) - - mock_output.info.assert_any_call("Tool Validation Report for openai") - mock_output.print_table.assert_called() - mock_output.error.assert_called() # For validation errors - mock_output.warning.assert_called() # For disabled tools - - assert result == [ - { - "validation_summary": mock_tool_manager.get_validation_summary.return_value - } - ] - - -@pytest.mark.asyncio -async def test_tools_action_async_adapted_tools( - mock_context, mock_tool_manager, adapted_tools -): - """Test tools action with adapted tools for LLM.""" - mock_tool_manager.get_adapted_tools_for_llm.return_value = (adapted_tools, None) - mock_tool_manager.get_validation_summary.return_value = { - "invalid_tools": 2, - "total_tools": 4, - } - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - with patch( - "mcp_cli.commands.actions.tools.create_tools_table" - ) as mock_create_table: - mock_table = MagicMock() - mock_create_table.return_value = mock_table - - result = await tools_action_async() - - # Should call get_adapted_tools_for_llm - mock_tool_manager.get_adapted_tools_for_llm.assert_called_with("openai") - - # Should show validation note - assert any( - "2 tools filtered out" in str(call) - for call in mock_output.print.call_args_list - ) - mock_output.hint.assert_called_with( - "Use --validation flag to see details" - ) - - assert len(result) == 2 - - -@pytest.mark.asyncio -async def test_tools_action_async_adapted_tools_error_fallback( - mock_context, mock_tool_manager, sample_tools -): - """Test tools action falls back when adapted tools fail.""" - mock_tool_manager.get_adapted_tools_for_llm.side_effect = Exception("Adapter error") - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output"): - with patch( - "mcp_cli.commands.actions.tools.create_tools_table" - ) as mock_create_table: - mock_create_table.return_value = MagicMock() - - result = await tools_action_async() - - # Should fall back to get_unique_tools - mock_tool_manager.get_unique_tools.assert_called_once() - assert len(result) == 2 - - -@pytest.mark.asyncio -async def test_tools_action_async_no_tools(mock_context, mock_tool_manager): - """Test tools action when no tools available.""" - mock_tool_manager.get_unique_tools.return_value = [] - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - result = await tools_action_async() - - mock_output.info.assert_called_once() - mock_output.warning.assert_called_with( - "No tools available from any server." - ) - assert result == [] - - -@pytest.mark.asyncio -async def test_tools_action_async_raw_mode( - mock_context, mock_tool_manager, sample_tools -): - """Test tools action with raw JSON output.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - await tools_action_async(show_raw=True) - - # Should call json output instead of table - mock_output.json.assert_called_once() - - # Verify JSON structure was output - call_args = mock_output.json.call_args[0][0] - json_data = json.loads(call_args) - assert len(json_data) == 2 - assert json_data[0]["name"] == "test_tool" - - -@pytest.mark.asyncio -async def test_tools_action_async_show_details( - mock_context, mock_tool_manager, sample_tools -): - """Test tools action with show_details=True.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output"): - with patch( - "mcp_cli.commands.actions.tools.create_tools_table" - ) as mock_create_table: - mock_table = MagicMock() - mock_create_table.return_value = mock_table - - await tools_action_async(show_details=True) - - # Verify create_tools_table was called with show_details=True - mock_create_table.assert_called_once_with( - sample_tools, show_details=True - ) - - -@pytest.mark.asyncio -async def test_tools_action_async_with_provider( - mock_context, mock_tool_manager, adapted_tools -): - """Test tools action with specific provider.""" - mock_tool_manager.get_adapted_tools_for_llm.return_value = (adapted_tools, None) - - with patch("mcp_cli.commands.actions.tools.get_context", return_value=mock_context): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools.output"): - with patch("mcp_cli.commands.actions.tools.create_tools_table"): - await tools_action_async(provider="anthropic") - - # Should use specified provider - mock_tool_manager.get_adapted_tools_for_llm.assert_called_with( - "anthropic" - ) - - -@pytest.mark.asyncio -async def test_show_validation_info_no_validation(mock_tool_manager): - """Test validation info when validation not available.""" - # Remove validation methods - delattr(mock_tool_manager, "get_validation_summary") - - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - result = await _show_validation_info(mock_tool_manager, "openai") - - mock_output.print.assert_called_with( - "Validation not available - using basic ToolManager" - ) - assert result == [] - - -@pytest.mark.asyncio -async def test_show_validation_info_with_errors(mock_tool_manager): - """Test validation info display with errors.""" - mock_tool_manager.get_validation_summary.return_value = { - "total_tools": 15, - "valid_tools": 10, - "invalid_tools": 5, - "disabled_by_user": 2, - "disabled_by_validation": 3, - "validation_errors": [ - {"tool": f"bad_tool_{i}", "error": f"Error {i}", "reason": f"reason_{i}"} - for i in range(15) - ], - "disabled_tools": {"tool1": "User disabled", "tool2": "Validation failed"}, - } - - with patch("mcp_cli.commands.actions.tools.output") as mock_output: - with patch("mcp_cli.commands.actions.tools.format_table") as mock_format_table: - mock_format_table.return_value = "formatted_table" - - result = await _show_validation_info(mock_tool_manager, "openai") - - # Should show validation summary - mock_output.info.assert_any_call("Tool Validation Report for openai") - - # Should show errors (first 10) - mock_output.error.assert_called() - - # Should show "more errors" message - assert any( - "and 5 more errors" in str(call) - for call in mock_output.info.call_args_list - ) - - # Should show disabled tools - mock_output.warning.assert_called() - - # Should show auto-fix status - assert any( - "Auto-fix: Enabled" in str(call) - for call in mock_output.info.call_args_list - ) - - # Should show commands - assert mock_output.print.call_count >= 5 # Multiple command prints - - assert result == [ - { - "validation_summary": mock_tool_manager.get_validation_summary.return_value - } - ] - - -@pytest.mark.asyncio -async def test_show_validation_info_long_error_messages(mock_tool_manager): - """Test validation info truncates long error messages.""" - long_error = "A" * 100 - mock_tool_manager.get_validation_summary.return_value = { - "total_tools": 1, - "valid_tools": 0, - "invalid_tools": 1, - "disabled_by_user": 0, - "disabled_by_validation": 0, - "validation_errors": [ - {"tool": "bad_tool", "error": long_error, "reason": "too_long"} - ], - "disabled_tools": {}, - } - - with patch("mcp_cli.commands.actions.tools.output"): - with patch("mcp_cli.commands.actions.tools.format_table"): - await _show_validation_info(mock_tool_manager, "openai") - - # Test passes diff --git a/tests/commands/actions/test_tools_call_action.py b/tests/commands/actions/test_tools_call_action.py deleted file mode 100644 index 1559ca11..00000000 --- a/tests/commands/actions/test_tools_call_action.py +++ /dev/null @@ -1,359 +0,0 @@ -"""Tests for the tools_call action command.""" - -import pytest -from unittest.mock import AsyncMock, MagicMock, patch - -from mcp_cli.commands.actions.tools_call import tools_call_action -from mcp_cli.tools.models import ToolInfo, ToolCallResult - - -@pytest.fixture -def mock_context(): - """Create a mock application context.""" - context = MagicMock() - context.tool_manager = MagicMock() - return context - - -@pytest.fixture -def mock_tool_manager(): - """Create a mock tool manager.""" - tm = MagicMock() - tm.get_unique_tools = AsyncMock() - tm.execute_tool = AsyncMock() - return tm - - -@pytest.fixture -def sample_tools(): - """Create sample tool data.""" - return [ - ToolInfo( - name="test_tool", - namespace="test_server", - description="A test tool", - parameters={"type": "object", "properties": {"arg1": {"type": "string"}}}, - ), - ToolInfo( - name="no_args_tool", - namespace="test_server", - description="Tool with no arguments", - parameters={"type": "object"}, - ), - ToolInfo( - name="no_desc_tool", - namespace="other_server", - description=None, - parameters={"type": "object", "properties": {}}, - ), - ] - - -@pytest.mark.asyncio -async def test_tools_call_action_no_tool_manager(mock_context): - """Test tools call when tool manager is not available.""" - mock_context.tool_manager = None - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - await tools_call_action() - - mock_output.print.assert_any_call( - "[red]Error:[/red] No tool manager available" - ) - - -@pytest.mark.asyncio -async def test_tools_call_action_no_tools(mock_context, mock_tool_manager): - """Test tools call when no tools are available.""" - mock_tool_manager.get_unique_tools.return_value = [] - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - await tools_call_action() - - mock_output.print.assert_any_call( - "[yellow]No tools available from any server.[/yellow]" - ) - - -@pytest.mark.asyncio -async def test_tools_call_action_successful_call_with_args( - mock_context, mock_tool_manager, sample_tools -): - """Test successful tool call with arguments.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - mock_result = ToolCallResult( - tool_name="test_tool", - success=True, - result={"data": "test_result"}, - ) - mock_result.duration_ms = 100 - mock_tool_manager.execute_tool.return_value = mock_result - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Simulate user selecting tool 1 and providing JSON arguments - mock_to_thread.side_effect = ["1", '{"arg1": "value1"}'] - - with patch( - "mcp_cli.commands.actions.tools_call.display_tool_call_result" - ) as mock_display: - await tools_call_action() - - # Verify tool list was displayed - mock_output.print.assert_any_call("[green]Available tools:[/green]") - - # Verify tool was executed with correct arguments - mock_tool_manager.execute_tool.assert_called_once_with( - "test_server.test_tool", {"arg1": "value1"} - ) - - # Verify result was displayed - mock_display.assert_called_once_with(mock_result) - - -@pytest.mark.asyncio -async def test_tools_call_action_tool_no_args( - mock_context, mock_tool_manager, sample_tools -): - """Test tool call for tool with no arguments.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - mock_result = ToolCallResult( - tool_name="no_args_tool", - success=True, - result={"data": "success"}, - ) - mock_result.duration_ms = 100 - mock_tool_manager.execute_tool.return_value = mock_result - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Select tool 2 (no_args_tool) - mock_to_thread.return_value = "2" - - with patch( - "mcp_cli.commands.actions.tools_call.display_tool_call_result" - ): - await tools_call_action() - - # Should show "Tool takes no arguments" - mock_output.print.assert_any_call( - "[dim]Tool takes no arguments.[/dim]" - ) - - # Should execute with empty args - mock_tool_manager.execute_tool.assert_called_once_with( - "test_server.no_args_tool", {} - ) - - -@pytest.mark.asyncio -async def test_tools_call_action_invalid_selection( - mock_context, mock_tool_manager, sample_tools -): - """Test invalid tool selection.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Invalid selection - mock_to_thread.return_value = "99" - - await tools_call_action() - - mock_output.print.assert_any_call("[red]Invalid selection.[/red]") - mock_tool_manager.execute_tool.assert_not_called() - - -@pytest.mark.asyncio -async def test_tools_call_action_non_numeric_selection( - mock_context, mock_tool_manager, sample_tools -): - """Test non-numeric tool selection.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Non-numeric selection - mock_to_thread.return_value = "abc" - - await tools_call_action() - - mock_output.print.assert_any_call("[red]Invalid selection.[/red]") - mock_tool_manager.execute_tool.assert_not_called() - - -@pytest.mark.asyncio -async def test_tools_call_action_invalid_json_args( - mock_context, mock_tool_manager, sample_tools -): - """Test invalid JSON arguments.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Select tool 1, provide invalid JSON - mock_to_thread.side_effect = ["1", "not valid json"] - - await tools_call_action() - - mock_output.print.assert_any_call("[red]Invalid JSON - aborting.[/red]") - mock_tool_manager.execute_tool.assert_not_called() - - -@pytest.mark.asyncio -async def test_tools_call_action_empty_args_input( - mock_context, mock_tool_manager, sample_tools -): - """Test empty arguments input (should use empty dict).""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - mock_result = ToolCallResult( - tool_name="test_tool", - success=True, - result={"data": "success"}, - ) - mock_result.duration_ms = 100 - mock_tool_manager.execute_tool.return_value = mock_result - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output"): - with patch("asyncio.to_thread") as mock_to_thread: - # Select tool 1, provide empty string for args - mock_to_thread.side_effect = ["1", " "] - - with patch( - "mcp_cli.commands.actions.tools_call.display_tool_call_result" - ): - await tools_call_action() - - # Should execute with empty args - mock_tool_manager.execute_tool.assert_called_once_with( - "test_server.test_tool", {} - ) - - -@pytest.mark.asyncio -async def test_tools_call_action_execution_error( - mock_context, mock_tool_manager, sample_tools -): - """Test error during tool execution.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - mock_tool_manager.execute_tool.side_effect = Exception("Execution failed") - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Select tool 1 - mock_to_thread.side_effect = ["1", ""] - - await tools_call_action() - - mock_output.print.assert_any_call("[red]Error: Execution failed[/red]") - - -@pytest.mark.asyncio -async def test_tools_call_action_tool_no_description( - mock_context, mock_tool_manager, sample_tools -): - """Test displaying tool with no description.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Select tool 3 (no_desc_tool) then cancel - mock_to_thread.return_value = "99" # Invalid to exit - - await tools_call_action() - - # Should show "No description" for tool 3 - calls = [str(call) for call in mock_output.print.call_args_list] - assert any( - "no_desc_tool" in str(call) and "No description" in str(call) - for call in calls - ) - - -@pytest.mark.asyncio -async def test_tools_call_action_displays_selected_tool_info( - mock_context, mock_tool_manager, sample_tools -): - """Test that selected tool information is displayed.""" - mock_tool_manager.get_unique_tools.return_value = sample_tools - mock_result = ToolCallResult( - tool_name="test_tool", - success=True, - result={"data": "success"}, - ) - mock_result.duration_ms = 100 - mock_tool_manager.execute_tool.return_value = mock_result - - with patch( - "mcp_cli.commands.actions.tools_call.get_context", return_value=mock_context - ): - mock_context.tool_manager = mock_tool_manager - - with patch("mcp_cli.commands.actions.tools_call.output") as mock_output: - with patch("asyncio.to_thread") as mock_to_thread: - # Select tool 1 - mock_to_thread.side_effect = ["1", ""] - - with patch( - "mcp_cli.commands.actions.tools_call.display_tool_call_result" - ): - await tools_call_action() - - # Should display selected tool info - # Check that the tool info was displayed (exact format may vary) - assert any( - "test_tool" in str(call) - for call in mock_output.print.call_args_list - ) - assert any( - "test_server" in str(call) or "A test tool" in str(call) - for call in mock_output.print.call_args_list - ) diff --git a/tests/commands/actions/test_tools_manage_action.py b/tests/commands/actions/test_tools_manage_action.py deleted file mode 100644 index 99d2d5fb..00000000 --- a/tests/commands/actions/test_tools_manage_action.py +++ /dev/null @@ -1,385 +0,0 @@ -"""Tests for tools manage action.""" - -from unittest.mock import AsyncMock, MagicMock, patch -import pytest - -from mcp_cli.commands.actions.tools_manage import ( - tools_manage_action_async, - tools_manage_action, -) - - -@pytest.fixture -def mock_tool_manager(): - """Create a mock ToolManager.""" - tm = MagicMock() - tm.enable_tool = MagicMock() - tm.disable_tool = MagicMock() - tm.validate_single_tool = AsyncMock() - tm.revalidate_tools = AsyncMock() - tm.get_validation_summary = MagicMock() - tm.get_disabled_tools = MagicMock() - tm.get_tool_validation_details = MagicMock() - tm.set_auto_fix_enabled = MagicMock() - tm.clear_validation_disabled_tools = MagicMock() - return tm - - -@pytest.mark.asyncio -async def test_tools_manage_enable_action(mock_tool_manager): - """Test enable action.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "enable", "test_tool" - ) - - mock_tool_manager.enable_tool.assert_called_once_with("test_tool") - mock_output.success.assert_called_once_with("✓ Enabled tool: test_tool") - - assert result == {"success": True, "action": "enable", "tool": "test_tool"} - - -@pytest.mark.asyncio -async def test_tools_manage_enable_no_tool_name(mock_tool_manager): - """Test enable action without tool name.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "enable") - - mock_output.error.assert_called_once_with( - "Tool name required for enable action" - ) - mock_tool_manager.enable_tool.assert_not_called() - - assert result == {"success": False, "error": "Tool name required"} - - -@pytest.mark.asyncio -async def test_tools_manage_disable_action(mock_tool_manager): - """Test disable action.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "disable", "test_tool" - ) - - mock_tool_manager.disable_tool.assert_called_once_with( - "test_tool", reason="user" - ) - mock_output.warning.assert_called_once_with("✗ Disabled tool: test_tool") - - assert result == {"success": True, "action": "disable", "tool": "test_tool"} - - -@pytest.mark.asyncio -async def test_tools_manage_disable_no_tool_name(mock_tool_manager): - """Test disable action without tool name.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "disable") - - mock_output.error.assert_called_once_with( - "Tool name required for disable action" - ) - mock_tool_manager.disable_tool.assert_not_called() - - assert result == {"success": False, "error": "Tool name required"} - - -@pytest.mark.asyncio -async def test_tools_manage_validate_single_tool_valid(mock_tool_manager): - """Test validate action for single valid tool.""" - mock_tool_manager.validate_single_tool.return_value = (True, None) - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "validate", "test_tool" - ) - - mock_tool_manager.validate_single_tool.assert_called_once_with("test_tool") - mock_output.success.assert_called_once_with("✓ Tool 'test_tool' is valid") - - assert result == { - "success": True, - "action": "validate", - "tool": "test_tool", - "is_valid": True, - "error": None, - } - - -@pytest.mark.asyncio -async def test_tools_manage_validate_single_tool_invalid(mock_tool_manager): - """Test validate action for single invalid tool.""" - mock_tool_manager.validate_single_tool.return_value = (False, "Invalid schema") - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "validate", "test_tool" - ) - - mock_tool_manager.validate_single_tool.assert_called_once_with("test_tool") - mock_output.error.assert_called_once_with( - "✗ Tool 'test_tool' is invalid: Invalid schema" - ) - - assert result == { - "success": True, - "action": "validate", - "tool": "test_tool", - "is_valid": False, - "error": "Invalid schema", - } - - -@pytest.mark.asyncio -async def test_tools_manage_validate_all_tools(mock_tool_manager): - """Test validate action for all tools.""" - summary = {"total_tools": 5, "valid_tools": 4, "invalid_tools": 1} - mock_tool_manager.revalidate_tools.return_value = summary - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "validate", provider="anthropic" - ) - - mock_tool_manager.revalidate_tools.assert_called_once_with("anthropic") - mock_output.info.assert_called_once_with( - "Validating all tools for anthropic..." - ) - mock_output.success.assert_called_once_with("Validation complete:") - - assert result == {"success": True, "action": "validate_all", "summary": summary} - - -@pytest.mark.asyncio -async def test_tools_manage_status_action(mock_tool_manager): - """Test status action.""" - summary = { - "total_tools": 10, - "valid_tools": 8, - "invalid_tools": 2, - "disabled_by_user": 1, - "disabled_by_validation": 1, - "auto_fix_enabled": True, - "provider": "openai", - } - mock_tool_manager.get_validation_summary.return_value = summary - - with ( - patch("mcp_cli.commands.actions.tools_manage.output") as mock_output, - patch( - "mcp_cli.commands.actions.tools_manage.format_table" - ) as mock_format_table, - ): - mock_table = MagicMock() - mock_format_table.return_value = mock_table - - result = await tools_manage_action_async(mock_tool_manager, "status") - - mock_output.print_table.assert_called_once_with(mock_table) - - assert result == {"success": True, "action": "status", "summary": summary} - - -@pytest.mark.asyncio -async def test_tools_manage_list_disabled_with_tools(mock_tool_manager): - """Test list-disabled action with disabled tools.""" - disabled_tools = {"tool1": "user", "tool2": "validation"} - mock_tool_manager.get_disabled_tools.return_value = disabled_tools - - with ( - patch("mcp_cli.commands.actions.tools_manage.output") as mock_output, - patch( - "mcp_cli.commands.actions.tools_manage.format_table" - ) as mock_format_table, - ): - mock_table = MagicMock() - mock_format_table.return_value = mock_table - - result = await tools_manage_action_async(mock_tool_manager, "list-disabled") - - mock_output.print_table.assert_called_once_with(mock_table) - - # Verify table data - table_data = mock_format_table.call_args[0][0] - assert len(table_data) == 2 - assert {"Tool Name": "tool1", "Reason": "user"} in table_data - assert {"Tool Name": "tool2", "Reason": "validation"} in table_data - - assert result == { - "success": True, - "action": "list_disabled", - "disabled_tools": disabled_tools, - } - - -@pytest.mark.asyncio -async def test_tools_manage_list_disabled_no_tools(mock_tool_manager): - """Test list-disabled action with no disabled tools.""" - mock_tool_manager.get_disabled_tools.return_value = {} - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "list-disabled") - - mock_output.success.assert_called_once_with("No disabled tools") - - assert result == { - "success": True, - "action": "list_disabled", - "disabled_tools": {}, - } - - -@pytest.mark.asyncio -async def test_tools_manage_details_action(mock_tool_manager): - """Test details action.""" - details = { - "is_enabled": True, - "disabled_reason": None, - "validation_error": None, - "can_auto_fix": False, - } - mock_tool_manager.get_tool_validation_details.return_value = details - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "details", "test_tool" - ) - - mock_tool_manager.get_tool_validation_details.assert_called_once_with( - "test_tool" - ) - mock_output.panel.assert_called_once() - - assert result == { - "success": True, - "action": "details", - "tool": "test_tool", - "details": details, - } - - -@pytest.mark.asyncio -async def test_tools_manage_details_no_tool_name(mock_tool_manager): - """Test details action without tool name.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "details") - - mock_output.error.assert_called_once_with( - "Tool name required for details action" - ) - - assert result == {"success": False, "error": "Tool name required"} - - -@pytest.mark.asyncio -async def test_tools_manage_details_tool_not_found(mock_tool_manager): - """Test details action with non-existent tool.""" - mock_tool_manager.get_tool_validation_details.return_value = None - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "details", "nonexistent" - ) - - mock_output.error.assert_called_once_with("Tool 'nonexistent' not found") - - assert result == {"success": False, "error": "Tool not found"} - - -@pytest.mark.asyncio -async def test_tools_manage_auto_fix_enable(mock_tool_manager): - """Test auto-fix enable action.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "auto-fix", enabled=True - ) - - mock_tool_manager.set_auto_fix_enabled.assert_called_once_with(True) - mock_output.info.assert_called_once_with("Auto-fix enabled") - - assert result == {"success": True, "action": "auto_fix", "enabled": True} - - -@pytest.mark.asyncio -async def test_tools_manage_auto_fix_disable(mock_tool_manager): - """Test auto-fix disable action.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async( - mock_tool_manager, "auto-fix", enabled=False - ) - - mock_tool_manager.set_auto_fix_enabled.assert_called_once_with(False) - mock_output.info.assert_called_once_with("Auto-fix disabled") - - assert result == {"success": True, "action": "auto_fix", "enabled": False} - - -@pytest.mark.asyncio -async def test_tools_manage_clear_validation(mock_tool_manager): - """Test clear-validation action.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "clear-validation") - - mock_tool_manager.clear_validation_disabled_tools.assert_called_once() - mock_output.success.assert_called_once_with( - "Cleared all validation-disabled tools" - ) - - assert result == {"success": True, "action": "clear_validation"} - - -@pytest.mark.asyncio -async def test_tools_manage_validation_errors_with_errors(mock_tool_manager): - """Test validation-errors action with errors.""" - errors = [ - {"tool": "tool1", "error": "Invalid schema"}, - {"tool": "tool2", "error": "Missing parameter"}, - ] - summary = {"validation_errors": errors} - mock_tool_manager.get_validation_summary.return_value = summary - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "validation-errors") - - mock_output.error.assert_called_once_with("Found 2 validation errors:") - - assert result == { - "success": True, - "action": "validation_errors", - "errors": errors, - } - - -@pytest.mark.asyncio -async def test_tools_manage_validation_errors_no_errors(mock_tool_manager): - """Test validation-errors action with no errors.""" - summary = {"validation_errors": []} - mock_tool_manager.get_validation_summary.return_value = summary - - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "validation-errors") - - mock_output.success.assert_called_once_with("No validation errors") - - assert result == {"success": True, "action": "validation_errors", "errors": []} - - -@pytest.mark.asyncio -async def test_tools_manage_unknown_action(mock_tool_manager): - """Test unknown action.""" - with patch("mcp_cli.commands.actions.tools_manage.output") as mock_output: - result = await tools_manage_action_async(mock_tool_manager, "unknown") - - mock_output.error.assert_called_once_with("Unknown action: unknown") - - assert result == {"success": False, "error": "Unknown action: unknown"} - - -def test_tools_manage_action_sync_wrapper(mock_tool_manager): - """Test the sync wrapper.""" - with patch("mcp_cli.commands.actions.tools_manage.asyncio.run") as mock_run: - mock_run.return_value = {"success": True} - - result = tools_manage_action(mock_tool_manager, "status", "tool") - - mock_run.assert_called_once() - assert result == {"success": True} diff --git a/tests/commands/definitions/test_clear_command.py b/tests/commands/definitions/test_clear_command.py index e7ee02e7..6c253090 100644 --- a/tests/commands/definitions/test_clear_command.py +++ b/tests/commands/definitions/test_clear_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch -from mcp_cli.commands.definitions.clear import ClearCommand +from mcp_cli.commands.core.clear import ClearCommand class TestClearCommand: diff --git a/tests/commands/definitions/test_clear_command_extended.py b/tests/commands/definitions/test_clear_command_extended.py index 18739bd2..f5731328 100644 --- a/tests/commands/definitions/test_clear_command_extended.py +++ b/tests/commands/definitions/test_clear_command_extended.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch, MagicMock -from mcp_cli.commands.definitions.clear import ClearCommand +from mcp_cli.commands.core.clear import ClearCommand class TestClearCommandExtended: diff --git a/tests/commands/definitions/test_conversation_command.py b/tests/commands/definitions/test_conversation_command.py index 4c00b96e..d8b08ef0 100644 --- a/tests/commands/definitions/test_conversation_command.py +++ b/tests/commands/definitions/test_conversation_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch, Mock -from mcp_cli.commands.definitions.conversation import ConversationCommand +from mcp_cli.commands.conversation.conversation import ConversationCommand from mcp_cli.commands.base import CommandMode from mcp_cli.chat.models import Message, MessageRole diff --git a/tests/commands/definitions/test_conversation_command_extended.py b/tests/commands/definitions/test_conversation_command_extended.py index 6cd8218e..a8894cd8 100644 --- a/tests/commands/definitions/test_conversation_command_extended.py +++ b/tests/commands/definitions/test_conversation_command_extended.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import MagicMock, patch -from mcp_cli.commands.definitions.conversation import ConversationCommand +from mcp_cli.commands.conversation.conversation import ConversationCommand from mcp_cli.commands.base import CommandMode from mcp_cli.chat.models import Message, MessageRole diff --git a/tests/commands/definitions/test_conversation_extended_coverage.py b/tests/commands/definitions/test_conversation_extended_coverage.py index d4b52a33..aa6729ea 100644 --- a/tests/commands/definitions/test_conversation_extended_coverage.py +++ b/tests/commands/definitions/test_conversation_extended_coverage.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import MagicMock, patch -from mcp_cli.commands.definitions.conversation import ConversationCommand +from mcp_cli.commands.conversation.conversation import ConversationCommand from mcp_cli.chat.models import Message, MessageRole diff --git a/tests/commands/definitions/test_conversation_load.py b/tests/commands/definitions/test_conversation_load.py index 2cfd84f6..be1f96a4 100644 --- a/tests/commands/definitions/test_conversation_load.py +++ b/tests/commands/definitions/test_conversation_load.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import MagicMock, patch -from mcp_cli.commands.definitions.conversation import ConversationCommand +from mcp_cli.commands.conversation.conversation import ConversationCommand from mcp_cli.chat.models import Message, MessageRole diff --git a/tests/commands/definitions/test_coverage_improvements.py b/tests/commands/definitions/test_coverage_improvements.py index 7cc8ef0a..5201f12a 100644 --- a/tests/commands/definitions/test_coverage_improvements.py +++ b/tests/commands/definitions/test_coverage_improvements.py @@ -3,12 +3,12 @@ import pytest from unittest.mock import patch -from mcp_cli.commands.definitions.providers import ( +from mcp_cli.commands.providers.providers import ( ProviderCommand, ProviderSetCommand, ProviderShowCommand, ) -from mcp_cli.commands.definitions.server_singular import ServerSingularCommand +from mcp_cli.commands.servers.server_singular import ServerSingularCommand # Removed help tests that were not working properly @@ -33,35 +33,35 @@ def show_command(self): @pytest.mark.asyncio async def test_provider_direct_switch(self, command): """Test provider direct switch with args.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - result = await command.execute(args=["openai"]) - assert result.success is True + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + result = await command.execute(args=["openai"]) + assert result.success is True @pytest.mark.asyncio async def test_provider_set_from_args_string(self, set_command): """Test set provider from string arg.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - result = await set_command.execute(args="anthropic") - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["anthropic"] + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + result = await set_command.execute(args="anthropic") + assert result.success is True + mock_ctx.model_manager.switch_provider.assert_called_once_with( + "anthropic" + ) @pytest.mark.asyncio async def test_provider_set_from_args_list(self, set_command): """Test set provider from list args.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - result = await set_command.execute(args=["ollama"]) - assert result.success is True + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + result = await set_command.execute(args=["ollama"]) + assert result.success is True @pytest.mark.asyncio async def test_provider_set_no_name(self, set_command): @@ -73,44 +73,45 @@ async def test_provider_set_no_name(self, set_command): @pytest.mark.asyncio async def test_provider_set_error(self, set_command): """Test set provider error.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Failed") - result = await set_command.execute(provider_name="bad") - assert result.success is False - assert "Failed to set provider" in result.error + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.side_effect = Exception("Failed") + with patch("chuk_term.ui.output"): + result = await set_command.execute(provider_name="bad") + assert result.success is False + assert "Failed to set provider" in result.error @pytest.mark.asyncio async def test_provider_show_execute(self, show_command): """Test show provider.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - result = await show_command.execute() - assert result.success is True + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + with patch("chuk_term.ui.output"): + result = await show_command.execute() + assert result.success is True @pytest.mark.asyncio async def test_provider_show_error(self, show_command): """Test show provider error.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Failed") - result = await show_command.execute() - assert result.success is False - assert "Failed to get provider info" in result.error + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.side_effect = Exception("Failed") + with patch("chuk_term.ui.output"): + result = await show_command.execute() + assert result.success is False + assert "Failed to get provider info" in result.error @pytest.mark.asyncio async def test_provider_command_error(self, command): """Test provider command error handling.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Error") - result = await command.execute(args=["test"]) - assert result.success is False + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.side_effect = Exception("Error") + with patch("chuk_term.ui.output"): + result = await command.execute(args=["test"]) + assert result.success is False class TestServerSingularCommandCoverage: @@ -123,38 +124,50 @@ def command(self): @pytest.mark.asyncio async def test_server_details_string_args(self, command): """Test server details with string args.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.return_value = [] - result = await command.execute(args="test-server") - assert result.success is True - # Verify ServerActionParams was created with the right args - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["test-server"] + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args="test-server") + assert result.success is True @pytest.mark.asyncio async def test_server_details_error(self, command): """Test server details error.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Not found") - result = await command.execute(args=["bad-server"]) - assert result.success is False - assert "Failed to execute server command" in result.error + from unittest.mock import AsyncMock + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock( + side_effect=Exception("Not found") + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["bad-server"]) + assert result.success is False + assert "Failed to get server details" in result.error @pytest.mark.asyncio async def test_server_no_args(self, command): """Test server with no args - should list servers.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.return_value = [] - result = await command.execute() - assert result.success is True - # Verify ServerActionParams was created with empty args - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == [] + from unittest.mock import AsyncMock + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute() + assert result.success is True diff --git a/tests/commands/definitions/test_execute_tool_command.py b/tests/commands/definitions/test_execute_tool_command.py index 97dd1491..6016494e 100644 --- a/tests/commands/definitions/test_execute_tool_command.py +++ b/tests/commands/definitions/test_execute_tool_command.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import AsyncMock, MagicMock, patch -from mcp_cli.commands.definitions.execute_tool import ExecuteToolCommand +from mcp_cli.commands.tools.execute_tool import ExecuteToolCommand from mcp_cli.commands.base import CommandMode from mcp_cli.tools.models import ToolCallResult @@ -92,7 +92,7 @@ async def test_execute_no_tool_manager(self, execute_command): @pytest.mark.asyncio async def test_execute_list_tools(self, execute_command, mock_tool_manager): """Test listing tools when no tool specified.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute(tool_manager=mock_tool_manager) # Should list available tools @@ -104,7 +104,7 @@ async def test_execute_list_tools(self, execute_command, mock_tool_manager): @pytest.mark.asyncio async def test_execute_tool_not_found(self, execute_command, mock_tool_manager): """Test execute with non-existent tool.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="nonexistent_tool", tool_manager=mock_tool_manager ) @@ -116,7 +116,7 @@ async def test_execute_tool_not_found(self, execute_command, mock_tool_manager): @pytest.mark.asyncio async def test_execute_show_tool_info(self, execute_command, mock_tool_manager): """Test showing tool info when no params provided.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", tool_manager=mock_tool_manager ) @@ -130,7 +130,7 @@ async def test_execute_show_tool_info(self, execute_command, mock_tool_manager): @pytest.mark.asyncio async def test_execute_tool_success(self, execute_command, mock_tool_manager): """Test successful tool execution.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params='{"text": "hello", "count": 5}', @@ -147,7 +147,7 @@ async def test_execute_tool_success(self, execute_command, mock_tool_manager): @pytest.mark.asyncio async def test_execute_tool_with_server(self, execute_command, mock_tool_manager): """Test tool execution with server specification.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", server="test_server", @@ -165,7 +165,7 @@ async def test_execute_invalid_json_params( self, execute_command, mock_tool_manager ): """Test execution with invalid JSON parameters.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params="invalid json", tool_manager=mock_tool_manager ) @@ -179,7 +179,7 @@ async def test_execute_plain_string_params( self, execute_command, mock_tool_manager ): """Test execution with plain string instead of JSON.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params="hello world", # Plain string, not JSON @@ -194,7 +194,7 @@ async def test_execute_plain_string_params( @pytest.mark.asyncio async def test_execute_tool_with_args(self, execute_command, mock_tool_manager): """Test parsing tool name and params from args.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( args=["test_tool", '{"text": "hello"}'], tool_manager=mock_tool_manager ) @@ -213,7 +213,7 @@ async def test_execute_tool_failure(self, execute_command, mock_tool_manager): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params='{"text": "hello"}', @@ -230,7 +230,7 @@ async def test_execute_tool_exception(self, execute_command, mock_tool_manager): side_effect=Exception("Unexpected error") ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params='{"text": "hello"}', @@ -245,7 +245,7 @@ async def test_execute_tool_exception(self, execute_command, mock_tool_manager): @pytest.mark.asyncio async def test_execute_empty_params(self, execute_command, mock_tool_manager): """Test execution with empty parameters.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="another_tool", params="{}", tool_manager=mock_tool_manager ) @@ -276,7 +276,7 @@ async def test_execute_get_all_tools_exception( @pytest.mark.asyncio async def test_execute_with_quoted_params(self, execute_command, mock_tool_manager): """Test handling params with surrounding quotes.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params='\'{"text": "hello"}\'', # Extra quotes @@ -301,7 +301,7 @@ async def test_execute_missing_required_param( ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params='{"count": 5}', # Missing required "text" @@ -316,7 +316,7 @@ async def test_execute_parse_simple_params( self, execute_command, mock_tool_manager ): """Test parsing simple key=value params.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: # The execute command should try to parse key=value format result = await execute_command.execute( tool="test_tool", @@ -340,7 +340,7 @@ async def test_execute_tool_dict_result(self, execute_command, mock_tool_manager ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params='{"text": "hello"}', @@ -355,7 +355,7 @@ async def test_execute_tool_no_result(self, execute_command, mock_tool_manager): """Test handling when tool returns no result.""" mock_tool_manager.execute_tool = AsyncMock(return_value=None) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params='{"text": "hello"}', @@ -368,7 +368,7 @@ async def test_execute_tool_no_result(self, execute_command, mock_tool_manager): @pytest.mark.asyncio async def test_execute_args_as_string(self, execute_command, mock_tool_manager): """Test args provided as string instead of list.""" - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( args="test_tool", tool_manager=mock_tool_manager ) diff --git a/tests/commands/definitions/test_execute_tool_command_extended.py b/tests/commands/definitions/test_execute_tool_command_extended.py index f504cd53..53e39ec4 100644 --- a/tests/commands/definitions/test_execute_tool_command_extended.py +++ b/tests/commands/definitions/test_execute_tool_command_extended.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import AsyncMock, MagicMock, patch -from mcp_cli.commands.definitions.execute_tool import ExecuteToolCommand +from mcp_cli.commands.tools.execute_tool import ExecuteToolCommand from mcp_cli.tools.models import ToolCallResult @@ -70,7 +70,7 @@ async def test_list_tools_with_server_names(self, execute_command): manager.get_all_tools = AsyncMock(return_value=[tool1, tool2]) manager.server_names = {0: "sqlite", 1: "echo"} - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute(tool_manager=manager) assert result.success is True @@ -98,7 +98,7 @@ async def test_show_tool_info_with_input_schema( manager.server_names = {} # Add server_names attribute manager.get_all_tools = AsyncMock(return_value=[mock_tool_with_input_schema]) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="schema_tool", tool_manager=manager ) @@ -122,7 +122,7 @@ async def test_show_tool_info_no_parameters(self, execute_command): manager.server_names = {} # Add server_names attribute manager.get_all_tools = AsyncMock(return_value=[tool]) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="simple_tool", tool_manager=manager ) @@ -139,7 +139,7 @@ async def test_show_tool_info_complex_parameters( manager.server_names = {} # Add server_names attribute manager.get_all_tools = AsyncMock(return_value=[mock_tool_with_schema]) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="complex_tool", tool_manager=manager ) @@ -171,7 +171,7 @@ async def test_parse_simple_params_key_value(self, execute_command): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output"): + with patch("mcp_cli.commands.tools.execute_tool.output"): result = await execute_command.execute( tool="test_tool", params="key1=value1 key2=value2 flag=true", @@ -202,7 +202,7 @@ async def test_execute_with_double_quoted_params(self, execute_command): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output"): + with patch("mcp_cli.commands.tools.execute_tool.output"): result = await execute_command.execute( tool="test_tool", params='"{"text": "hello"}"', # Double quoted JSON @@ -220,7 +220,7 @@ async def test_execute_json_decode_error_with_example( manager.server_names = {} # Add server_names attribute manager.get_all_tools = AsyncMock(return_value=[mock_tool_with_schema]) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="complex_tool", params="{invalid json}", tool_manager=manager ) @@ -247,7 +247,7 @@ async def test_execute_tool_result_dict_type(self, execute_command): return_value={"status": "ok", "data": [1, 2, 3]} ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params="{}", tool_manager=manager ) @@ -273,7 +273,7 @@ async def test_execute_tool_with_warning_no_result(self, execute_command): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params="{}", tool_manager=manager ) @@ -299,7 +299,7 @@ async def test_execute_with_server_parameter(self, execute_command): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output"): + with patch("mcp_cli.commands.tools.execute_tool.output"): result = await execute_command.execute( tool="test_tool", server="specific_server", @@ -328,7 +328,7 @@ async def test_execute_string_result(self, execute_command): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params="{}", tool_manager=manager ) @@ -361,7 +361,7 @@ async def test_execute_with_error_missing_required_param_extraction( ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params="{}", tool_manager=manager ) @@ -391,7 +391,7 @@ async def test_execute_with_generic_error(self, execute_command): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output") as mock_output: + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: result = await execute_command.execute( tool="test_tool", params="{}", tool_manager=manager ) @@ -417,7 +417,7 @@ async def test_args_as_list_with_multiple_params(self, execute_command): ) ) - with patch("mcp_cli.commands.definitions.execute_tool.output"): + with patch("mcp_cli.commands.tools.execute_tool.output"): result = await execute_command.execute( args=["test_tool", '{"text": "hello"}', "extra_arg"], tool_manager=manager, @@ -442,7 +442,7 @@ async def test_plain_string_error_with_equals_sign(self, execute_command): manager.get_all_tools = AsyncMock(return_value=[tool]) - with patch("mcp_cli.commands.definitions.execute_tool.output"): + with patch("mcp_cli.commands.tools.execute_tool.output"): result = await execute_command.execute( tool="test_tool", params="key=value", # Has equals but will be parsed differently diff --git a/tests/commands/definitions/test_execute_tool_coverage.py b/tests/commands/definitions/test_execute_tool_coverage.py new file mode 100644 index 00000000..9eebfe9d --- /dev/null +++ b/tests/commands/definitions/test_execute_tool_coverage.py @@ -0,0 +1,623 @@ +"""Additional tests for execute_tool command to achieve >90% coverage.""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from mcp_cli.commands.tools.execute_tool import ExecuteToolCommand, _to_serializable +from mcp_cli.tools.models import ToolCallResult + + +@pytest.fixture +def execute_command(): + """Create an execute tool command instance.""" + return ExecuteToolCommand() + + +class TestToSerializable: + """Tests for _to_serializable function.""" + + def test_none_value(self): + """Test serializing None.""" + assert _to_serializable(None) is None + + def test_primitives(self): + """Test serializing primitives.""" + assert _to_serializable("hello") == "hello" + assert _to_serializable(42) == 42 + assert _to_serializable(3.14) == 3.14 + assert _to_serializable(True) is True + + def test_list(self): + """Test serializing lists.""" + assert _to_serializable([1, 2, 3]) == [1, 2, 3] + assert _to_serializable(["a", None, True]) == ["a", None, True] + + def test_dict(self): + """Test serializing dicts.""" + assert _to_serializable({"key": "value"}) == {"key": "value"} + assert _to_serializable({"nested": {"a": 1}}) == {"nested": {"a": 1}} + + def test_pydantic_model_with_model_dump(self): + """Test serializing object with model_dump method.""" + obj = MagicMock() + obj.model_dump.return_value = {"field": "value"} + # Remove dict method to force model_dump path + del obj.dict + + result = _to_serializable(obj) + assert result == {"field": "value"} + + def test_pydantic_model_with_dict(self): + """Test serializing object with dict method (older Pydantic).""" + obj = MagicMock(spec=["dict"]) + obj.dict.return_value = {"field": "value"} + + result = _to_serializable(obj) + assert result == {"field": "value"} + + def test_mcp_tool_result_with_text_content(self): + """Test serializing MCP SDK ToolResult with text content.""" + text_item = MagicMock() + text_item.text = "Hello, world!" + # Remove model_dump to force text path + del text_item.model_dump + + obj = MagicMock(spec=["content"]) + obj.content = [text_item] + + result = _to_serializable(obj) + assert result == "Hello, world!" + + def test_mcp_tool_result_with_multiple_text_content(self): + """Test serializing MCP SDK ToolResult with multiple text items.""" + text_item1 = MagicMock() + text_item1.text = "Line 1" + del text_item1.model_dump + + text_item2 = MagicMock() + text_item2.text = "Line 2" + del text_item2.model_dump + + obj = MagicMock(spec=["content"]) + obj.content = [text_item1, text_item2] + + result = _to_serializable(obj) + assert result == ["Line 1", "Line 2"] + + def test_mcp_tool_result_with_model_dump_content(self): + """Test serializing MCP SDK ToolResult with model_dump items.""" + item1 = MagicMock() + item1.model_dump.return_value = {"type": "image", "data": "base64..."} + # Ensure text attribute doesn't exist + del item1.text + + item2 = MagicMock() + item2.model_dump.return_value = {"type": "text", "data": "hello"} + del item2.text + + obj = MagicMock(spec=["content"]) + obj.content = [item1, item2] # Multiple items returns list + + result = _to_serializable(obj) + assert result == [ + {"type": "image", "data": "base64..."}, + {"type": "text", "data": "hello"}, + ] + + def test_mcp_tool_result_with_plain_content(self): + """Test serializing MCP SDK ToolResult with content that has no special handling.""" + item1 = MagicMock(spec=[]) # No text, no model_dump + item2 = MagicMock(spec=[]) # No text, no model_dump + + obj = MagicMock(spec=["content"]) + obj.content = [item1, item2] # Multiple items returns list + + result = _to_serializable(obj) + # Should fall back to str() for each item, returns list for multiple + assert isinstance(result, list) + assert len(result) == 2 + + def test_mcp_tool_result_with_non_list_content(self): + """Test serializing MCP SDK ToolResult with non-list content.""" + obj = MagicMock(spec=["content"]) + obj.content = {"direct": "content"} + + result = _to_serializable(obj) + assert result == {"direct": "content"} + + def test_fallback_to_string(self): + """Test fallback to string for unknown types.""" + + class CustomClass: + def __str__(self): + return "custom_string" + + result = _to_serializable(CustomClass()) + assert result == "custom_string" + + +class TestExecuteToolArgsHandling: + """Tests for args parameter handling.""" + + @pytest.mark.asyncio + async def test_args_string_as_params_when_tool_set(self, execute_command): + """Test args as string becomes params when tool is already set.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "test_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = {} + + manager.get_all_tools = AsyncMock(return_value=[tool]) + manager.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test_tool", success=True, result="Success" + ) + ) + + with patch("mcp_cli.commands.tools.execute_tool.output"): + result = await execute_command.execute( + tool="test_tool", + args="{}", # String args should become params + tool_manager=manager, + ) + + assert result.success is True + manager.execute_tool.assert_called_once() + + +class TestExecuteToolErrorHandling: + """Tests for error handling paths.""" + + @pytest.mark.asyncio + async def test_tool_not_found_on_specific_server(self, execute_command): + """Test tool not found when filtering by server.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "test_tool" + tool.namespace = "server1" + tool.inputSchema = None + tool.parameters = {} + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + await execute_command.execute( + tool="test_tool", + server="server2", # Different server + params="{}", + tool_manager=manager, + ) + + # Should show error and list tools + mock_output.error.assert_called_with( + "Tool 'test_tool' not found on server 'server2'" + ) + + @pytest.mark.asyncio + async def test_json_error_with_no_required_params(self, execute_command): + """Test JSON error display when tool has no required params.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "test_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = { + "properties": {"optional": {"type": "string"}}, + "required": [], # No required params + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="test_tool", + params="{invalid json}", + tool_manager=manager, + ) + + assert result.success is False + # Should show empty example since no required params + output_calls = str(mock_output.print.call_args_list) + assert "'{}'" in output_calls or "execute" in output_calls + + @pytest.mark.asyncio + async def test_json_error_with_message_param(self, execute_command): + """Test JSON error showing 'message' param with special handling.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "echo_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = { + "properties": {"message": {"type": "string"}}, + "required": ["message"], + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="echo_tool", + params="{bad json}", + tool_manager=manager, + ) + + assert result.success is False + # Should show example with "your message here" + output_calls = str(mock_output.print.call_args_list) + assert "message" in output_calls + + @pytest.mark.asyncio + async def test_json_error_with_number_boolean_params(self, execute_command): + """Test JSON error example with number and boolean types.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "typed_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = { + "properties": { + "count": {"type": "number"}, + "enabled": {"type": "boolean"}, + }, + "required": ["count", "enabled"], + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="typed_tool", + params="{invalid}", + tool_manager=manager, + ) + + assert result.success is False + # Should show example with 123 and true + output_calls = str(mock_output.print.call_args_list) + assert "123" in output_calls or "true" in output_calls.lower() + + @pytest.mark.asyncio + async def test_exception_unexpected_keyword_argument(self, execute_command): + """Test exception handling for unexpected keyword argument error.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "test_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = {} + + manager.get_all_tools = AsyncMock(return_value=[tool]) + manager.execute_tool = AsyncMock( + side_effect=TypeError("got an unexpected keyword argument 'foo'") + ) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="test_tool", + params="{}", + tool_manager=manager, + ) + + assert result.success is False + mock_output.print.assert_any_call( + "Internal error - please report this issue" + ) + + @pytest.mark.asyncio + async def test_exception_invalid_parameter(self, execute_command): + """Test exception handling for Invalid parameter error.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "test_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = {} + + manager.get_all_tools = AsyncMock(return_value=[tool]) + manager.execute_tool = AsyncMock( + side_effect=ValueError("Invalid parameter: foo must be string") + ) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="test_tool", + params="{}", + tool_manager=manager, + ) + + assert result.success is False + # Should show hint about checking parameters + mock_output.hint.assert_called_with( + "Use /execute to see correct parameters" + ) + + +class TestListToolsEmpty: + """Tests for empty tools list.""" + + @pytest.mark.asyncio + async def test_list_tools_empty(self, execute_command): + """Test listing tools when no tools available.""" + manager = AsyncMock() + manager.server_names = {} + manager.get_all_tools = AsyncMock(return_value=[]) + + result = await execute_command.execute(tool_manager=manager) + + assert result.success is True + assert "No tools available" in result.output + + +class TestShowToolInfoEdgeCases: + """Tests for _show_tool_info edge cases.""" + + @pytest.mark.asyncio + async def test_show_tool_info_with_input_schema(self, execute_command): + """Test showing tool info when tool has inputSchema (not parameters).""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "schema_tool" + tool.description = "Tool with inputSchema" + tool.namespace = "default" + tool.parameters = None # No parameters + tool.inputSchema = { + "properties": {"query": {"type": "string", "description": "Query"}}, + "required": ["query"], + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="schema_tool", tool_manager=manager + ) + + assert result.success is True + mock_output.rule.assert_any_call("Parameters") + + @pytest.mark.asyncio + async def test_show_tool_info_no_properties(self, execute_command): + """Test showing tool info when schema has no properties.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "no_props_tool" + tool.description = "Tool without properties" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = {"type": "object"} # Schema without properties + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="no_props_tool", tool_manager=manager + ) + + assert result.success is True + mock_output.print.assert_any_call(" No parameters required") + + @pytest.mark.asyncio + async def test_show_tool_info_array_object_types(self, execute_command): + """Test showing tool info with array and object parameter types.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "complex_tool" + tool.description = "Tool with complex types" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = { + "properties": { + "items": {"type": "array", "description": "List of items"}, + "config": {"type": "object", "description": "Configuration"}, + }, + "required": ["items", "config"], + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="complex_tool", tool_manager=manager + ) + + assert result.success is True + # Example should contain [] and {} + output_calls = str(mock_output.print.call_args_list) + assert "[]" in output_calls or "{}" in output_calls + + @pytest.mark.asyncio + async def test_show_tool_info_no_required_params(self, execute_command): + """Test showing tool info when no params are required.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "optional_tool" + tool.description = "Tool with only optional params" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = { + "properties": {"opt": {"type": "string", "description": "Optional"}}, + "required": [], + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="optional_tool", tool_manager=manager + ) + + assert result.success is True + # Example should just show the tool name without params + output_calls = str(mock_output.print.call_args_list) + assert "optional_tool" in output_calls + + +class TestParseSimpleParams: + """Tests for _parse_simple_params method.""" + + @pytest.mark.asyncio + async def test_parse_simple_params_single_value(self, execute_command): + """Test parsing single value without key.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "test_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = {} + + manager.get_all_tools = AsyncMock(return_value=[tool]) + manager.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test_tool", success=True, result="Success" + ) + ) + + with patch("mcp_cli.commands.tools.execute_tool.output"): + result = await execute_command.execute( + tool="test_tool", + params="singlevalue", # Single value without = + tool_manager=manager, + ) + + # Should parse as {"value": "singlevalue"} + call_args = manager.execute_tool.call_args + if call_args: + args = call_args[1].get("arguments", {}) + assert "value" in args or result.success is False + + @pytest.mark.asyncio + async def test_parse_simple_params_json_value(self, execute_command): + """Test parsing key=value where value is JSON.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "test_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = {} + + manager.get_all_tools = AsyncMock(return_value=[tool]) + manager.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test_tool", success=True, result="Success" + ) + ) + + with patch("mcp_cli.commands.tools.execute_tool.output"): + result = await execute_command.execute( + tool="test_tool", + params='count=42 flag=true items=["a","b"]', + tool_manager=manager, + ) + + # Should parse JSON values properly + call_args = manager.execute_tool.call_args + if call_args: + args = call_args[1].get("arguments", {}) + # count should be int 42, flag should be bool True + assert args.get("count") == 42 or result is not None + + +class TestPlainStringErrorGuessing: + """Tests for guessing param name when user provides plain string.""" + + @pytest.mark.asyncio + async def test_plain_string_error_no_properties(self, execute_command): + """Test plain string error when tool has no properties.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "simple_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = {} # No properties + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="simple_tool", + params="just a string", + tool_manager=manager, + ) + + assert result.success is False + # Should default to "message" as param name + output_calls = str(mock_output.print.call_args_list) + assert "message" in output_calls + + @pytest.mark.asyncio + async def test_plain_string_error_with_required_param(self, execute_command): + """Test plain string error guessing first required param.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "query_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = { + "properties": { + "query": {"type": "string"}, + "limit": {"type": "number"}, + }, + "required": ["query"], + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="query_tool", + params="my search query", + tool_manager=manager, + ) + + assert result.success is False + # Should use "query" as the guessed param name + output_calls = str(mock_output.print.call_args_list) + assert "query" in output_calls + + @pytest.mark.asyncio + async def test_plain_string_error_first_property(self, execute_command): + """Test plain string error using first property when no required.""" + manager = AsyncMock() + manager.server_names = {} + tool = MagicMock() + tool.name = "flex_tool" + tool.namespace = "default" + tool.inputSchema = None + tool.parameters = { + "properties": { + "input": {"type": "string"}, + }, + "required": [], # None required + } + + manager.get_all_tools = AsyncMock(return_value=[tool]) + + with patch("mcp_cli.commands.tools.execute_tool.output") as mock_output: + result = await execute_command.execute( + tool="flex_tool", + params="some input", + tool_manager=manager, + ) + + assert result.success is False + # Should use "input" as the guessed param name + output_calls = str(mock_output.print.call_args_list) + assert "input" in output_calls diff --git a/tests/commands/definitions/test_exit_command.py b/tests/commands/definitions/test_exit_command.py index 836fc24a..1054ad89 100644 --- a/tests/commands/definitions/test_exit_command.py +++ b/tests/commands/definitions/test_exit_command.py @@ -1,7 +1,7 @@ """Tests for the exit command.""" import pytest -from mcp_cli.commands.definitions.exit import ExitCommand +from mcp_cli.commands.core.exit import ExitCommand class TestExitCommand: diff --git a/tests/commands/definitions/test_exit_command_extended.py b/tests/commands/definitions/test_exit_command_extended.py index b16cd44d..c991cb92 100644 --- a/tests/commands/definitions/test_exit_command_extended.py +++ b/tests/commands/definitions/test_exit_command_extended.py @@ -1,7 +1,7 @@ """Extended tests for exit command to achieve 100% coverage.""" import pytest -from mcp_cli.commands.definitions.exit import ExitCommand +from mcp_cli.commands.core.exit import ExitCommand from mcp_cli.commands.base import CommandMode diff --git a/tests/commands/definitions/test_help_command.py b/tests/commands/definitions/test_help_command.py index 8947726e..555801a9 100644 --- a/tests/commands/definitions/test_help_command.py +++ b/tests/commands/definitions/test_help_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import Mock, patch -from mcp_cli.commands.definitions.help import HelpCommand +from mcp_cli.commands.core.help import HelpCommand from mcp_cli.commands.base import CommandMode @@ -25,7 +25,7 @@ def test_command_properties(self, command): async def test_execute_general_help(self, command): """Test showing general help.""" with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" + "mcp_cli.commands.core.help.UnifiedCommandRegistry" ) as mock_registry_class: # Create a mock registry instance mock_registry = Mock() @@ -47,8 +47,8 @@ async def test_execute_general_help(self, command): mock_registry.list_commands.return_value = [mock_cmd1, mock_cmd2] # Patch the output functions to avoid actual printing - with patch("mcp_cli.commands.definitions.help.output"): - with patch("mcp_cli.commands.definitions.help.format_table"): + with patch("mcp_cli.commands.core.help.output"): + with patch("mcp_cli.commands.core.help.format_table"): result = await command.execute() assert result.success is True @@ -57,7 +57,7 @@ async def test_execute_general_help(self, command): async def test_execute_specific_command_help(self, command): """Test showing help for a specific command.""" with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" + "mcp_cli.commands.core.help.UnifiedCommandRegistry" ) as mock_registry_class: # Create a mock registry instance mock_registry = Mock() @@ -74,7 +74,7 @@ async def test_execute_specific_command_help(self, command): mock_registry.get.return_value = mock_cmd # Patch the output to avoid actual printing - with patch("mcp_cli.commands.definitions.help.output"): + with patch("mcp_cli.commands.core.help.output"): result = await command.execute(command="test") assert result.success is True @@ -84,7 +84,7 @@ async def test_execute_specific_command_help(self, command): async def test_execute_unknown_command(self, command): """Test showing help for an unknown command.""" with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" + "mcp_cli.commands.core.help.UnifiedCommandRegistry" ) as mock_registry_class: # Create a mock registry instance mock_registry = Mock() @@ -101,7 +101,7 @@ async def test_execute_unknown_command(self, command): async def test_execute_with_mode_filter(self, command): """Test showing help filtered by mode.""" with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" + "mcp_cli.commands.core.help.UnifiedCommandRegistry" ) as mock_registry_class: # Create a mock registry instance mock_registry = Mock() @@ -125,8 +125,8 @@ async def test_execute_with_mode_filter(self, command): mock_registry.list_commands.return_value = [mock_cmd1, mock_cmd2] # Patch the output functions to avoid actual printing - with patch("mcp_cli.commands.definitions.help.output"): - with patch("mcp_cli.commands.definitions.help.format_table"): + with patch("mcp_cli.commands.core.help.output"): + with patch("mcp_cli.commands.core.help.format_table"): # Execute with chat mode filter result = await command.execute(mode=CommandMode.CHAT) diff --git a/tests/commands/definitions/test_help_command_extended.py b/tests/commands/definitions/test_help_command_extended.py index 63d1dd51..fd73842b 100644 --- a/tests/commands/definitions/test_help_command_extended.py +++ b/tests/commands/definitions/test_help_command_extended.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import MagicMock, patch -from mcp_cli.commands.definitions.help import HelpCommand +from mcp_cli.commands.core.help import HelpCommand from mcp_cli.commands.base import CommandMode @@ -16,9 +16,7 @@ def help_command(): @pytest.fixture def mock_registry(): """Create a mock registry with various commands.""" - with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" - ) as MockRegistry: + with patch("mcp_cli.commands.core.help.UnifiedCommandRegistry") as MockRegistry: mock_reg = MagicMock() # Create mock commands with different modes cmd1 = MagicMock() @@ -179,9 +177,7 @@ async def test_help_format_output(help_command, mock_registry): @pytest.mark.asyncio async def test_help_empty_registry(help_command): """Test help with no commands registered.""" - with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" - ) as MockRegistry: + with patch("mcp_cli.commands.core.help.UnifiedCommandRegistry") as MockRegistry: mock_reg = MagicMock() mock_reg.list_commands.return_value = [] MockRegistry.return_value = mock_reg @@ -255,9 +251,7 @@ async def test_help_with_commands_with_aliases(): """Test help showing commands with aliases column.""" help_cmd = HelpCommand() - with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" - ) as MockRegistry: + with patch("mcp_cli.commands.core.help.UnifiedCommandRegistry") as MockRegistry: mock_reg = MagicMock() cmd1 = MagicMock() diff --git a/tests/commands/definitions/test_help_coverage.py b/tests/commands/definitions/test_help_coverage.py index 32e70e04..2d6c66ec 100644 --- a/tests/commands/definitions/test_help_coverage.py +++ b/tests/commands/definitions/test_help_coverage.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import patch, MagicMock -from mcp_cli.commands.definitions.help import HelpCommand +from mcp_cli.commands.core.help import HelpCommand from mcp_cli.commands.base import ( CommandMode, CommandResult, @@ -120,7 +120,7 @@ def test_requires_context_property(self): assert self.help_cmd.requires_context is False @pytest.mark.asyncio - @patch("mcp_cli.commands.definitions.help.output") + @patch("mcp_cli.commands.core.help.output") async def test_list_commands_with_subcommands_indicator(self, mock_output): """Test that commands with subcommands show the ▸ indicator.""" # Create a command group with subcommands @@ -141,7 +141,7 @@ async def test_list_commands_with_subcommands_indicator(self, mock_output): assert mock_output.print_table.called @pytest.mark.asyncio - @patch("mcp_cli.commands.definitions.help.output") + @patch("mcp_cli.commands.core.help.output") async def test_list_commands_with_many_subcommands(self, mock_output): """Test that commands with many subcommands are truncated in table.""" # Create a command group with many subcommands @@ -161,7 +161,7 @@ async def test_list_commands_with_many_subcommands(self, mock_output): assert mock_output.print_table.called @pytest.mark.asyncio - @patch("mcp_cli.commands.definitions.help.output") + @patch("mcp_cli.commands.core.help.output") async def test_list_commands_shows_subcommands_column(self, mock_output): """Test that the subcommands column appears when there are command groups.""" # Create regular command @@ -182,7 +182,7 @@ async def test_list_commands_shows_subcommands_column(self, mock_output): assert mock_output.print_table.called @pytest.mark.asyncio - @patch("mcp_cli.commands.definitions.help.output") + @patch("mcp_cli.commands.core.help.output") async def test_list_commands_shows_subcommand_hints(self, mock_output): """Test that hints about subcommands are shown.""" # Create a command group with subcommands @@ -211,7 +211,7 @@ async def test_execute_with_exception(self): # Mock the registry to raise an exception with patch( - "mcp_cli.commands.definitions.help.UnifiedCommandRegistry" + "mcp_cli.commands.core.help.UnifiedCommandRegistry" ) as mock_registry_class: mock_registry = MagicMock() mock_registry.list_commands.side_effect = RuntimeError("Test error") @@ -226,7 +226,7 @@ async def test_execute_with_exception(self): assert "Test error" in result.error @pytest.mark.asyncio - @patch("mcp_cli.commands.definitions.help.output") + @patch("mcp_cli.commands.core.help.output") async def test_list_commands_with_aliases_and_subcommands(self, mock_output): """Test listing commands that have both aliases and subcommands.""" # Create a command group with aliases and subcommands @@ -247,7 +247,7 @@ async def test_list_commands_with_aliases_and_subcommands(self, mock_output): assert mock_output.print_table.called @pytest.mark.asyncio - @patch("mcp_cli.commands.definitions.help.output") + @patch("mcp_cli.commands.core.help.output") async def test_list_commands_with_few_subcommands(self, mock_output): """Test that commands with 3 or fewer subcommands show all names.""" # Create a command group with exactly 3 subcommands diff --git a/tests/commands/definitions/test_interrupt_command.py b/tests/commands/definitions/test_interrupt_command.py index 2da81d26..b62c54f3 100644 --- a/tests/commands/definitions/test_interrupt_command.py +++ b/tests/commands/definitions/test_interrupt_command.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import MagicMock -from mcp_cli.commands.definitions.interrupt import InterruptCommand +from mcp_cli.commands.core.interrupt import InterruptCommand from mcp_cli.commands.base import CommandMode diff --git a/tests/commands/definitions/test_models_command.py b/tests/commands/definitions/test_models_command.py index 6c20502a..dbf84b8a 100644 --- a/tests/commands/definitions/test_models_command.py +++ b/tests/commands/definitions/test_models_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch -from mcp_cli.commands.definitions.models import ModelCommand +from mcp_cli.commands.providers.models import ModelCommand from mcp_cli.commands.base import CommandGroup @@ -29,15 +29,16 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_no_subcommand(self, command): """Test executing models without a subcommand.""" - # When no subcommand is provided, it should use the default (list) - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = { - "models": [{"name": "gpt-4", "provider": "openai"}] - } + # When no subcommand is provided, it should show current model status + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + mock_ctx.model_manager.get_active_provider.return_value = "openai" - result = await command.execute() + with patch("chuk_term.ui.output"): + result = await command.execute() - # Should default to list subcommand + # Should show current model status assert result.success is True @pytest.mark.asyncio @@ -47,24 +48,34 @@ async def test_execute_list_subcommand(self, command): list_cmd = command.subcommands.get("list") assert list_cmd is not None - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = { - "models": [{"name": "gpt-4", "provider": "openai"}] - } + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" - result = await list_cmd.execute() + # Mock model discovery through chuk_llm + with patch( + "mcp_cli.commands.providers.models.ModelListCommand._get_provider_models" + ) as mock_discover: + mock_discover.return_value = ["gpt-4", "gpt-3.5-turbo"] + + with patch("chuk_term.ui.output"): + result = await list_cmd.execute() assert result.success is True - mock_action.assert_called_once() + mock_ctx.model_manager.get_active_provider.assert_called_once() @pytest.mark.asyncio async def test_execute_invalid_subcommand(self, command): """Test executing with an invalid subcommand.""" # The ModelCommand treats unknown subcommands as model names # So we need to test with args that would be an invalid model - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - # Simulate the action failing for an invalid model - mock_action.side_effect = Exception("Model not found: invalid") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + # Simulate the model switch failing for an invalid model + mock_ctx.model_manager.switch_model.side_effect = Exception( + "Model not found: invalid" + ) result = await command.execute(args=["invalid"]) diff --git a/tests/commands/definitions/test_models_command_extended.py b/tests/commands/definitions/test_models_command_extended.py index c9fb6bc9..98efd8bb 100644 --- a/tests/commands/definitions/test_models_command_extended.py +++ b/tests/commands/definitions/test_models_command_extended.py @@ -1,9 +1,9 @@ """Extended tests for the models command definition to improve coverage.""" import pytest -from unittest.mock import patch +from unittest.mock import patch, MagicMock -from mcp_cli.commands.definitions.models import ( +from mcp_cli.commands.providers.models import ( ModelCommand, ModelListCommand, ModelSetCommand, @@ -35,10 +35,13 @@ def test_model_command_properties(self, command): @pytest.mark.asyncio async def test_model_command_default_execution(self, command): """Test executing model command without subcommand.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + mock_ctx.model_manager.get_active_provider.return_value = "openai" - result = await command.execute() + with patch("chuk_term.ui.output"): + result = await command.execute() assert result.success is True @@ -60,35 +63,47 @@ def test_list_command_properties(self, command): @pytest.mark.asyncio async def test_list_command_execute(self, command): """Test executing list command.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" - result = await command.execute() + # Mock model discovery through chuk_llm + with patch( + "mcp_cli.commands.providers.models.ModelListCommand._get_provider_models" + ) as mock_discover: + mock_discover.return_value = ["gpt-4", "gpt-3.5-turbo"] + + with patch("chuk_term.ui.output"): + result = await command.execute() assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["list"] + mock_ctx.model_manager.get_active_provider.assert_called_once() @pytest.mark.asyncio async def test_list_command_with_refresh(self, command): - """Test list command with refresh parameter.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = None + """Test list command with refresh parameter (refresh not used currently).""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" - result = await command.execute(refresh=True) + # Mock model discovery through chuk_llm + with patch( + "mcp_cli.commands.providers.models.ModelListCommand._get_provider_models" + ) as mock_discover: + mock_discover.return_value = ["gpt-4", "gpt-3.5-turbo"] + + with patch("chuk_term.ui.output"): + result = await command.execute(refresh=True) assert result.success is True - # The implementation doesn't pass refresh parameter - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["list"] @pytest.mark.asyncio async def test_list_command_error(self, command): """Test list command error handling.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.side_effect = Exception("Failed to list models") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.side_effect = Exception("Failed to list models") result = await command.execute() @@ -114,16 +129,15 @@ def test_set_command_properties(self, command): @pytest.mark.asyncio async def test_set_command_execute(self, command): """Test executing set command.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_model.return_value = None - result = await command.execute(model_name="gpt-4") + with patch("chuk_term.ui.output"): + result = await command.execute(model_name="gpt-4") assert result.success is True - # The implementation passes model name directly - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["gpt-4"] + mock_ctx.model_manager.switch_model.assert_called_once() @pytest.mark.asyncio async def test_set_command_no_model_name(self, command): @@ -136,22 +150,21 @@ async def test_set_command_no_model_name(self, command): @pytest.mark.asyncio async def test_set_command_from_args(self, command): """Test set command with model name from args.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_model.return_value = None - result = await command.execute(args=["gpt-3.5-turbo"]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["gpt-3.5-turbo"]) assert result.success is True - # The implementation passes model name directly - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["gpt-3.5-turbo"] + mock_ctx.model_manager.switch_model.assert_called_once() @pytest.mark.asyncio async def test_set_command_error(self, command): """Test set command error handling.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.side_effect = Exception("Model not found") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.side_effect = Exception("Model not found") result = await command.execute(model_name="invalid-model") @@ -178,22 +191,22 @@ def test_show_command_properties(self, command): @pytest.mark.asyncio async def test_show_command_execute(self, command): """Test executing show command.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + mock_ctx.model_manager.get_active_provider.return_value = "openai" - result = await command.execute() + with patch("chuk_term.ui.output"): + result = await command.execute() assert result.success is True - # The implementation passes empty list - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == [] + mock_ctx.model_manager.get_active_model.assert_called_once() @pytest.mark.asyncio async def test_show_command_error(self, command): """Test show command error handling.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.side_effect = Exception("Failed to show model") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.side_effect = Exception("Failed to show model") result = await command.execute() @@ -212,18 +225,32 @@ def command(self): @pytest.mark.asyncio async def test_execute_with_model_name_directly(self, command): """Test executing model command with model name directly.""" - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_model.return_value = None - # When model name is provided directly - result = await command.execute(args=["gpt-4o"]) + with patch("chuk_term.ui.output"): + # When model name is provided directly + result = await command.execute(args=["gpt-4o"]) assert result.success is True @pytest.mark.asyncio async def test_execute_list_subcommand(self, command): """Test executing model list subcommand.""" - result = await command.execute(subcommand="list") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + + # Mock model discovery through chuk_llm + with patch( + "mcp_cli.commands.providers.models.ModelListCommand._get_provider_models" + ) as mock_discover: + mock_discover.return_value = ["gpt-4"] + + with patch("chuk_term.ui.output"): + result = await command.execute(subcommand="list") # Should delegate to list subcommand assert isinstance(result, CommandResult) @@ -232,10 +259,385 @@ async def test_execute_list_subcommand(self, command): async def test_execute_invalid_subcommand(self, command): """Test executing with invalid subcommand that gets treated as model name.""" # The 'invalid' will be treated as model name to switch to - with patch("mcp_cli.commands.actions.models.model_action_async") as mock_action: - mock_action.side_effect = Exception("Model not found: invalid") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_model.side_effect = Exception( + "Model not found: invalid" + ) result = await command.execute(args=["invalid"]) assert result.success is False assert "Failed to switch model" in result.error + + @pytest.mark.asyncio + async def test_execute_known_subcommand_routing(self, command): + """Test that known subcommands are routed to parent class.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + + with patch( + "mcp_cli.commands.providers.models.ModelListCommand._get_provider_models" + ) as mock_discover: + mock_discover.return_value = ["gpt-4"] + + with patch("chuk_term.ui.output"): + # Test various known subcommand names + for subcmd in ["list", "ls", "set", "show", "current", "status"]: + result = await command.execute(args=[subcmd]) + assert result is not None + + @pytest.mark.asyncio + async def test_execute_no_context(self, command): + """Test execution when context is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute(args=["gpt-4"]) + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_execute_no_model_manager(self, command): + """Test execution when model_manager is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager = None + + result = await command.execute(args=["gpt-4"]) + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_execute_args_as_string(self, command): + """Test execution with args as a string instead of list.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_model.return_value = None + + with patch("chuk_term.ui.output"): + result = await command.execute(args="gpt-4o") + + assert result.success is True + + +class TestModelListCommandExtended: + """Extended tests for ModelListCommand.""" + + @pytest.fixture + def command(self): + """Create a ModelListCommand instance.""" + return ModelListCommand() + + @pytest.mark.asyncio + async def test_list_no_context(self, command): + """Test list command when context is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_list_no_model_manager(self, command): + """Test list command when model_manager is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_list_ollama_provider(self, command): + """Test list command with Ollama provider.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "ollama" + mock_ctx.model_manager.get_active_model.return_value = "llama2" + + with patch.object( + command, "_get_ollama_models", return_value=["llama2", "codellama"] + ): + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute() + + assert result.success is True + + @pytest.mark.asyncio + async def test_list_no_models_found(self, command): + """Test list command when no models are discovered.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + + with patch.object(command, "_get_provider_models", return_value=[]): + with patch("chuk_term.ui.output"): + result = await command.execute() + + assert result.success is True + + def test_get_ollama_models_success(self, command): + """Test _get_ollama_models with successful ollama list.""" + with patch("subprocess.run") as mock_run: + mock_run.return_value.returncode = 0 + mock_run.return_value.stdout = "NAME\nllama2\ncodellama\nmistral" + + models = command._get_ollama_models() + + assert models == ["llama2", "codellama", "mistral"] + + def test_get_ollama_models_failure(self, command): + """Test _get_ollama_models when ollama list fails.""" + with patch("subprocess.run") as mock_run: + mock_run.return_value.returncode = 1 + + models = command._get_ollama_models() + + assert models == [] + + def test_get_ollama_models_exception(self, command): + """Test _get_ollama_models when exception occurs.""" + with patch("subprocess.run") as mock_run: + mock_run.side_effect = Exception("Command not found") + + models = command._get_ollama_models() + + assert models == [] + + def test_get_provider_models_success(self, command): + """Test _get_provider_models with successful discovery.""" + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + mock_list.return_value = {"openai": {"models": ["gpt-4", "gpt-3.5-turbo"]}} + + models = command._get_provider_models("openai") + + assert models == ["gpt-4", "gpt-3.5-turbo"] + + def test_get_provider_models_available_models_key(self, command): + """Test _get_provider_models with available_models key.""" + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + mock_list.return_value = { + "openai": {"available_models": ["gpt-4", "gpt-3.5-turbo"]} + } + + models = command._get_provider_models("openai") + + assert models == ["gpt-4", "gpt-3.5-turbo"] + + def test_get_provider_models_exception(self, command): + """Test _get_provider_models when exception occurs.""" + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + mock_list.side_effect = Exception("Import error") + + models = command._get_provider_models("openai") + + assert models == [] + + def test_get_provider_models_unknown_provider(self, command): + """Test _get_provider_models with unknown provider.""" + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + mock_list.return_value = {"openai": {"models": ["gpt-4"]}} + + models = command._get_provider_models("unknown") + + assert models == [] + + def test_get_provider_models_placeholder_with_default(self, command): + """Test _get_provider_models when models is ['*'] but default_model exists.""" + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + # No API key, so API won't be called - falls back to default_model + mock_list.return_value = { + "deepseek": { + "models": ["*"], + "default_model": "deepseek-chat", + "has_api_key": False, + } + } + + models = command._get_provider_models("deepseek") + + assert models == ["deepseek-chat"] + + def test_get_provider_models_placeholder_no_default(self, command): + """Test _get_provider_models when models is ['*'] and no default_model.""" + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + mock_list.return_value = {"unknown": {"models": ["*"]}} + + models = command._get_provider_models("unknown") + + assert models == [] + + def test_get_provider_models_calls_api_on_placeholder(self, command): + """Test _get_provider_models calls API when models is ['*'] and has API key.""" + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + mock_list.return_value = { + "deepseek": { + "models": ["*"], + "default_model": "deepseek-chat", + "has_api_key": True, + "api_base": "https://api.deepseek.com/v1", + } + } + # Mock the API call + with patch.object( + command, + "_fetch_models_from_api", + return_value=["deepseek-chat", "deepseek-reasoner"], + ): + models = command._get_provider_models("deepseek") + + assert models == ["deepseek-chat", "deepseek-reasoner"] + + def test_fetch_models_from_api_success(self, command): + """Test _fetch_models_from_api with successful API response.""" + import os + + with patch.dict(os.environ, {"DEEPSEEK_API_KEY": "test-key"}): + with patch("httpx.get") as mock_get: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "data": [ + {"id": "deepseek-chat"}, + {"id": "deepseek-reasoner"}, + ] + } + mock_get.return_value = mock_response + + models = command._fetch_models_from_api( + "deepseek", "https://api.deepseek.com/v1" + ) + + assert models == ["deepseek-chat", "deepseek-reasoner"] + + def test_fetch_models_from_api_no_api_key(self, command): + """Test _fetch_models_from_api returns empty when no API key.""" + import os + + with patch.dict(os.environ, {}, clear=True): + # Ensure no DEEPSEEK_API_KEY + if "DEEPSEEK_API_KEY" in os.environ: + del os.environ["DEEPSEEK_API_KEY"] + + models = command._fetch_models_from_api( + "deepseek", "https://api.deepseek.com/v1" + ) + + assert models == [] + + def test_fetch_models_from_api_error(self, command): + """Test _fetch_models_from_api handles errors gracefully.""" + import os + + with patch.dict(os.environ, {"DEEPSEEK_API_KEY": "test-key"}): + with patch("httpx.get") as mock_get: + mock_get.side_effect = Exception("Network error") + + models = command._fetch_models_from_api( + "deepseek", "https://api.deepseek.com/v1" + ) + + assert models == [] + + def test_fetch_models_from_api_non_200(self, command): + """Test _fetch_models_from_api handles non-200 status.""" + import os + + with patch.dict(os.environ, {"DEEPSEEK_API_KEY": "test-key"}): + with patch("httpx.get") as mock_get: + mock_response = MagicMock() + mock_response.status_code = 401 + mock_get.return_value = mock_response + + models = command._fetch_models_from_api( + "deepseek", "https://api.deepseek.com/v1" + ) + + assert models == [] + + +class TestModelSetCommandExtended: + """Extended tests for ModelSetCommand.""" + + @pytest.fixture + def command(self): + """Create a ModelSetCommand instance.""" + return ModelSetCommand() + + @pytest.mark.asyncio + async def test_set_args_as_string(self, command): + """Test set command with args as string.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_model.return_value = None + + with patch("chuk_term.ui.output"): + result = await command.execute(args="gpt-4o") + + assert result.success is True + + @pytest.mark.asyncio + async def test_set_no_context(self, command): + """Test set command when context is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute(model_name="gpt-4") + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_set_no_model_manager(self, command): + """Test set command when model_manager is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager = None + + result = await command.execute(model_name="gpt-4") + + assert result.success is False + assert "No LLM manager available" in result.error + + +class TestModelShowCommandExtended: + """Extended tests for ModelShowCommand.""" + + @pytest.fixture + def command(self): + """Create a ModelShowCommand instance.""" + return ModelShowCommand() + + @pytest.mark.asyncio + async def test_show_no_context(self, command): + """Test show command when context is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_show_no_model_manager(self, command): + """Test show command when model_manager is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error diff --git a/tests/commands/definitions/test_ping_command.py b/tests/commands/definitions/test_ping_command.py index 606c721f..bdf32541 100644 --- a/tests/commands/definitions/test_ping_command.py +++ b/tests/commands/definitions/test_ping_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import Mock, patch -from mcp_cli.commands.definitions.ping import PingCommand +from mcp_cli.commands.servers.ping import PingCommand class TestPingCommand: @@ -32,31 +32,43 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_all_servers(self, command): """Test pinging all servers.""" - mock_tm = Mock() - - with patch("mcp_cli.commands.actions.ping.ping_action_async") as mock_ping: - mock_ping.return_value = True # Success + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + mock_tm = Mock() + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_tm.get_server_info = AsyncMock(return_value=[mock_server]) + + with patch("chuk_term.ui.output"): result = await command.execute(tool_manager=mock_tm) - - # Verify ping was called without targets (ping all) - mock_ping.assert_called_once_with(mock_tm, targets=[]) - assert result.success is True @pytest.mark.asyncio async def test_execute_specific_server(self, command): """Test pinging a specific server.""" - mock_tm = Mock() - - with patch("mcp_cli.commands.actions.ping.ping_action_async") as mock_ping: - mock_ping.return_value = True - - result = await command.execute(tool_manager=mock_tm, server_index=1) - - # Verify ping was called with server target - mock_ping.assert_called_once_with(mock_tm, targets=["1"]) + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + mock_tm = Mock() + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_tm.get_server_info = AsyncMock(return_value=[mock_server]) + + with patch("chuk_term.ui.output"): + result = await command.execute(tool_manager=mock_tm, server_index=0) assert result.success is True @pytest.mark.asyncio @@ -69,33 +81,42 @@ async def test_execute_no_tool_manager(self, command): @pytest.mark.asyncio async def test_execute_failed_ping(self, command): - """Test when ping returns False.""" - mock_tm = Mock() - - with patch("mcp_cli.commands.actions.ping.ping_action_async") as mock_ping: - mock_ping.return_value = False # No servers pinged + """Test when server is disconnected.""" + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + mock_tm = Mock() + mock_server = ServerInfo( + id=1, + name="test-server", + status="stopped", + connected=False, + tool_count=0, + namespace="test", + ) + mock_tm.get_server_info = AsyncMock(return_value=[mock_server]) + + with patch("chuk_term.ui.output"): result = await command.execute(tool_manager=mock_tm) - assert result.success is False @pytest.mark.asyncio async def test_execute_error_handling(self, command): """Test error handling during ping.""" - mock_tm = Mock() + from unittest.mock import AsyncMock - with patch("mcp_cli.commands.actions.ping.ping_action_async") as mock_ping: - mock_ping.side_effect = Exception("Network error") + mock_tm = Mock() + mock_tm.get_server_info = AsyncMock(side_effect=Exception("Network error")) + with patch("chuk_term.ui.output"): result = await command.execute(tool_manager=mock_tm) - assert result.success is False assert "Network error" in result.error @pytest.mark.asyncio async def test_execute_with_context_exception(self, command): """Test getting tool manager from context when it throws exception.""" - with patch("mcp_cli.commands.definitions.ping.get_context") as mock_ctx: + with patch("mcp_cli.context.get_context") as mock_ctx: # Make context throw exception mock_ctx.side_effect = Exception("Context error") @@ -108,31 +129,155 @@ async def test_execute_with_context_exception(self, command): @pytest.mark.asyncio async def test_execute_with_args_list(self, command): """Test executing with args as list.""" - mock_tm = Mock() - - with patch("mcp_cli.commands.actions.ping.ping_action_async") as mock_ping: - mock_ping.return_value = True + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + mock_tm = Mock() + mock_server1 = ServerInfo( + id=1, + name="server1", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_server2 = ServerInfo( + id=2, + name="server2", + status="running", + connected=True, + tool_count=3, + namespace="test", + ) + mock_tm.get_server_info = AsyncMock(return_value=[mock_server1, mock_server2]) + + with patch("chuk_term.ui.output"): # Pass args as a list result = await command.execute( tool_manager=mock_tm, args=["server1", "server2"] ) - - # Should pass targets from args list - mock_ping.assert_called_once_with(mock_tm, targets=["server1", "server2"]) assert result.success is True @pytest.mark.asyncio async def test_execute_with_args_string(self, command): """Test executing with args as string.""" - mock_tm = Mock() - - with patch("mcp_cli.commands.actions.ping.ping_action_async") as mock_ping: - mock_ping.return_value = True + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + mock_tm = Mock() + mock_server = ServerInfo( + id=1, + name="server1", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_tm.get_server_info = AsyncMock(return_value=[mock_server]) + + with patch("chuk_term.ui.output"): # Pass args as a string result = await command.execute(tool_manager=mock_tm, args="server1") + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_no_servers(self, command): + """Test when no servers are available.""" + from unittest.mock import AsyncMock + + mock_tm = Mock() + mock_tm.get_server_info = AsyncMock(return_value=[]) - # Should convert string to list - mock_ping.assert_called_once_with(mock_tm, targets=["server1"]) + with patch("chuk_term.ui.output"): + result = await command.execute(tool_manager=mock_tm) + assert result.success is False + assert "No servers available" in result.output + + @pytest.mark.asyncio + async def test_execute_with_context_success(self, command): + """Test getting tool manager from context successfully.""" + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + + mock_tm = Mock() + mock_tm.get_server_info = AsyncMock(return_value=[mock_server]) + + mock_ctx = Mock() + mock_ctx.tool_manager = mock_tm + + with patch("mcp_cli.commands.servers.ping.get_context") as mock_get_ctx: + mock_get_ctx.return_value = mock_ctx + with patch("chuk_term.ui.output"): + result = await command.execute() + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_filter_by_index(self, command): + """Test filtering servers by index.""" + from mcp_cli.tools.models import ServerInfo + from unittest.mock import AsyncMock + + mock_tm = Mock() + mock_server1 = ServerInfo( + id=1, + name="server1", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_server2 = ServerInfo( + id=2, + name="server2", + status="running", + connected=True, + tool_count=3, + namespace="test", + ) + mock_tm.get_server_info = AsyncMock(return_value=[mock_server1, mock_server2]) + + with patch("chuk_term.ui.output"): + # Filter by index "0" - should only match first server + result = await command.execute(tool_manager=mock_tm, args=["0"]) assert result.success is True + + @pytest.mark.asyncio + async def test_execute_context_returns_none(self, command): + """Test when context returns None.""" + with patch("mcp_cli.commands.servers.ping.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute() + + assert result.success is False + assert "No active tool manager" in result.error + + @pytest.mark.asyncio + async def test_execute_server_ping_exception(self, command): + """Test when accessing server.connected raises an exception.""" + from unittest.mock import AsyncMock, PropertyMock + + mock_tm = Mock() + + # Create a mock server that raises exception when connected is accessed + mock_server = Mock() + mock_server.name = "test-server" + type(mock_server).connected = PropertyMock( + side_effect=Exception("Connection check failed") + ) + + mock_tm.get_server_info = AsyncMock(return_value=[mock_server]) + + with patch("chuk_term.ui.output"): + result = await command.execute(tool_manager=mock_tm) + # Should fail because the server ping raised an exception + assert result.success is False diff --git a/tests/commands/definitions/test_prompts_command.py b/tests/commands/definitions/test_prompts_command.py index 091d9c66..edec0957 100644 --- a/tests/commands/definitions/test_prompts_command.py +++ b/tests/commands/definitions/test_prompts_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch -from mcp_cli.commands.definitions.prompts import PromptsCommand +from mcp_cli.commands.resources.prompts import PromptsCommand class TestPromptsCommand: @@ -28,113 +28,89 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_list_all(self, command): """Test listing all prompts.""" - with patch( - "mcp_cli.commands.actions.prompts.prompts_action_async" - ) as mock_action: - mock_action.return_value = { - "prompts": [ - { - "name": "summarize", - "server": "text-processor", - "description": "Summarize text", - "arguments": ["text", "max_length"], - }, - { - "name": "translate", - "server": "translator", - "description": "Translate text", - "arguments": ["text", "target_language"], - }, - ] - } - - result = await command.execute() - - mock_action.assert_called_once_with() - - assert result.success is True + from unittest.mock import AsyncMock, MagicMock + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + # Create mock prompt objects + mock_prompt1 = MagicMock() + mock_prompt1.name = "summarize" + mock_prompt1.description = "Summarize text" + mock_prompt2 = MagicMock() + mock_prompt2.name = "translate" + mock_prompt2.description = "Translate text" + mock_prompts = [mock_prompt1, mock_prompt2] + mock_ctx.tool_manager.list_prompts = AsyncMock(return_value=mock_prompts) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute() + + assert result.success is True @pytest.mark.asyncio async def test_execute_by_server(self, command): """Test listing prompts for a specific server.""" - with patch( - "mcp_cli.commands.actions.prompts.prompts_action_async" - ) as mock_action: - mock_action.return_value = { - "prompts": [ - { - "name": "summarize", - "server": "text-processor", - "description": "Summarize text", - } - ] - } - - result = await command.execute(server=0) # server parameter is an index - - mock_action.assert_called_once_with() - - assert result.success is True + from unittest.mock import AsyncMock, MagicMock + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_prompt = MagicMock() + mock_prompt.name = "summarize" + mock_prompt.description = "Summarize text" + mock_prompts = [mock_prompt] + mock_ctx.tool_manager.list_prompts = AsyncMock(return_value=mock_prompts) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute( + server=0 + ) # server parameter is an index + + assert result.success is True @pytest.mark.asyncio async def test_execute_detailed(self, command): """Test listing prompts with detailed information.""" - with patch( - "mcp_cli.commands.actions.prompts.prompts_action_async" - ) as mock_action: - mock_action.return_value = { - "prompts": [ - { - "name": "summarize", - "server": "text-processor", - "description": "Summarize text", - "arguments": [ - { - "name": "text", - "type": "string", - "required": True, - "description": "Text to summarize", - }, - { - "name": "max_length", - "type": "integer", - "required": False, - "default": 100, - "description": "Maximum summary length", - }, - ], - } - ] - } - - result = await command.execute(raw=True) - - mock_action.assert_called_once_with() - - assert result.success is True + from unittest.mock import AsyncMock, MagicMock + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_prompt = MagicMock() + mock_prompt.name = "summarize" + mock_prompt.description = "Summarize text" + mock_prompts = [mock_prompt] + mock_ctx.tool_manager.list_prompts = AsyncMock(return_value=mock_prompts) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute(raw=True) + + assert result.success is True @pytest.mark.asyncio async def test_execute_no_prompts(self, command): """Test when no prompts are available.""" - with patch( - "mcp_cli.commands.actions.prompts.prompts_action_async" - ) as mock_action: - mock_action.return_value = {"prompts": []} + from unittest.mock import AsyncMock - result = await command.execute() + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.list_prompts = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute() - assert result.success is True - # Should indicate no prompts available + assert result.success is True + # Should indicate no prompts available @pytest.mark.asyncio async def test_execute_error_handling(self, command): """Test error handling during execution.""" - with patch( - "mcp_cli.commands.actions.prompts.prompts_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Server error") - - result = await command.execute() - - assert result.success is False - assert "Server error" in result.error or result.output + from unittest.mock import AsyncMock + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.list_prompts = AsyncMock( + side_effect=Exception("Server error") + ) + with patch("chuk_term.ui.output"): + result = await command.execute() + + assert result.success is False + assert "Server error" in result.error diff --git a/tests/commands/definitions/test_provider_singular_command.py b/tests/commands/definitions/test_provider_singular_command.py index 01d85a06..2cd4c519 100644 --- a/tests/commands/definitions/test_provider_singular_command.py +++ b/tests/commands/definitions/test_provider_singular_command.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import patch -from mcp_cli.commands.definitions.provider_singular import ProviderSingularCommand +from mcp_cli.commands.providers.provider_singular import ProviderSingularCommand @pytest.fixture @@ -27,172 +27,139 @@ def test_provider_command_properties(provider_command): @pytest.mark.asyncio async def test_provider_show_status_no_args(provider_command): """Test showing current provider status with no arguments.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + with patch("chuk_term.ui.output"): + result = await provider_command.execute(args=[]) - result = await provider_command.execute(args=[]) - - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == [] + assert result.success is True + mock_ctx.model_manager.get_active_provider.assert_called_once() @pytest.mark.asyncio async def test_provider_show_status_error(provider_command): """Test error handling when showing provider status fails.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Connection failed") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.side_effect = Exception( + "Connection failed" + ) + with patch("chuk_term.ui.output"): + result = await provider_command.execute(args=[]) - result = await provider_command.execute(args=[]) - - assert result.success is False - assert "Failed to show provider status: Connection failed" in result.error + assert result.success is False + assert "Failed to show provider status: Connection failed" in result.error @pytest.mark.asyncio async def test_provider_switch_to_provider(provider_command): """Test switching to a different provider.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - - result = await provider_command.execute(args=["ollama"]) + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + result = await provider_command.execute(args=["ollama"]) - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["ollama"] + assert result.success is True + mock_ctx.model_manager.switch_provider.assert_called_once_with("ollama") @pytest.mark.asyncio async def test_provider_switch_error(provider_command): """Test error handling when switching provider fails.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Invalid provider") - - result = await provider_command.execute(args=["invalid"]) + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.side_effect = Exception( + "Invalid provider" + ) + with patch("chuk_term.ui.output"): + result = await provider_command.execute(args=["invalid"]) - assert result.success is False - assert "Failed to switch provider: Invalid provider" in result.error + assert result.success is False + assert "Failed to switch provider: Invalid provider" in result.error @pytest.mark.asyncio async def test_provider_list_subcommand(provider_command): """Test handling of list subcommand.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - + with patch("mcp_cli.context.get_context"): result = await provider_command.execute(args=["list"]) - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["list"] + assert result.success is False + assert "Use /providers list" in result.error @pytest.mark.asyncio async def test_provider_ls_alias(provider_command): """Test handling of ls alias for list.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - + with patch("mcp_cli.context.get_context"): result = await provider_command.execute(args=["ls"]) - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["ls"] + assert result.success is False + assert "Use /providers ls" in result.error @pytest.mark.asyncio async def test_provider_set_subcommand(provider_command): """Test handling of set subcommand.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - + with patch("mcp_cli.context.get_context"): result = await provider_command.execute( args=["set", "openai", "api_key", "test-key"] ) - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["set", "openai", "api_key", "test-key"] + assert result.success is False + assert "Use /providers set" in result.error @pytest.mark.asyncio async def test_provider_subcommand_error(provider_command): """Test error handling for subcommand failures.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Set command failed") - + with patch("mcp_cli.context.get_context"): result = await provider_command.execute(args=["set", "invalid"]) assert result.success is False - assert "Command failed: Set command failed" in result.error + assert "Use /providers set" in result.error @pytest.mark.asyncio async def test_provider_with_string_args(provider_command): """Test handling of string arguments instead of list.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + # Test with string argument + result = await provider_command.execute(args="ollama") - # Test with string argument - result = await provider_command.execute(args="ollama") - - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["ollama"] + assert result.success is True + mock_ctx.model_manager.switch_provider.assert_called_once_with("ollama") @pytest.mark.asyncio async def test_provider_with_multiple_args(provider_command): """Test handling of multiple arguments.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - - result = await provider_command.execute(args=["openai", "gpt-4"]) + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + result = await provider_command.execute(args=["openai", "gpt-4"]) - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == ["openai", "gpt-4"] + assert result.success is True + mock_ctx.model_manager.switch_provider.assert_called_once_with("openai") @pytest.mark.asyncio async def test_provider_with_no_kwargs(provider_command): """Test handling when no kwargs provided.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = None - - # No args key in kwargs - result = await provider_command.execute() - - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == [] # Should default to empty list + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + with patch("chuk_term.ui.output"): + # No args key in kwargs + result = await provider_command.execute() + + assert result.success is True + mock_ctx.model_manager.get_active_provider.assert_called_once() diff --git a/tests/commands/definitions/test_providers_command.py b/tests/commands/definitions/test_providers_command.py index 796522e2..71dc6773 100644 --- a/tests/commands/definitions/test_providers_command.py +++ b/tests/commands/definitions/test_providers_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch -from mcp_cli.commands.definitions.providers import ProviderCommand +from mcp_cli.commands.providers.providers import ProviderCommand from mcp_cli.commands.base import CommandGroup @@ -29,18 +29,20 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_no_subcommand(self, command): """Test executing providers without a subcommand.""" - # When no subcommand is provided, it should use the default (list) - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = { - "providers": [{"name": "openai", "status": "configured"}] - } - - result = await command.execute() - - # Should default to list subcommand - assert result.success is True + # When no subcommand is provided, it should list providers + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_available_providers.return_value = [ + "openai", + "anthropic", + ] + mock_ctx.model_manager.get_active_provider.return_value = "openai" + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute() + + # Should default to list + assert result.success is True @pytest.mark.asyncio async def test_execute_list_subcommand(self, command): @@ -49,46 +51,51 @@ async def test_execute_list_subcommand(self, command): list_cmd = command.subcommands.get("list") assert list_cmd is not None - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.return_value = { - "providers": [{"name": "ollama", "status": "active"}] - } + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "ollama" - result = await list_cmd.execute() + # Mock provider discovery through chuk_llm + with patch("chuk_llm.llm.client.list_available_providers") as mock_list: + mock_list.return_value = {"ollama": {"has_api_key": True, "models": []}} - assert result.success is True - mock_action.assert_called_once() + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await list_cmd.execute() + + assert result.success is True + mock_ctx.model_manager.get_active_provider.assert_called_once() @pytest.mark.asyncio async def test_execute_invalid_subcommand(self, command): """Test executing with an invalid subcommand.""" # The ProviderCommand treats unknown subcommands as provider names # So we need to test with args that would be an invalid provider - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value # Simulate the action failing for an invalid provider - mock_action.side_effect = Exception("Provider not found: invalid") + mock_ctx.model_manager.switch_provider.side_effect = Exception( + "Provider not found: invalid" + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["invalid"]) - result = await command.execute(args=["invalid"]) - - assert result.success is False - assert "Failed to switch provider" in result.error + assert result.success is False + assert "Failed to switch provider" in result.error @pytest.mark.asyncio async def test_execute_no_args_error(self, command): - """Test error handling when listing with no args fails.""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Connection failed") - - result = await command.execute(args=[]) - - assert result.success is False - assert "Failed to list providers" in result.error + """Test error handling when list subcommand fails.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.side_effect = Exception( + "Connection failed" + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args=[]) + + assert result.success is False + assert "Failed to list providers" in result.error @pytest.mark.asyncio async def test_execute_set_subcommand(self, command): @@ -96,28 +103,30 @@ async def test_execute_set_subcommand(self, command): set_cmd = command.subcommands.get("set") assert set_cmd is not None - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - result = await set_cmd.execute(args=["ollama"]) + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + result = await set_cmd.execute(args=["ollama"]) - assert result.success is True - mock_action.assert_called_once() + assert result.success is True + mock_ctx.model_manager.switch_provider.assert_called_once() @pytest.mark.asyncio async def test_execute_set_error(self, command): """Test error handling in set subcommand.""" set_cmd = command.subcommands.get("set") - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Invalid provider") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.side_effect = Exception( + "Invalid provider" + ) + with patch("chuk_term.ui.output"): + result = await set_cmd.execute(args=["invalid"]) - result = await set_cmd.execute(args=["invalid"]) - - assert result.success is False - assert "Failed to set provider" in result.error + assert result.success is False + assert "Failed to set provider" in result.error @pytest.mark.asyncio async def test_execute_show_subcommand(self, command): @@ -125,56 +134,261 @@ async def test_execute_show_subcommand(self, command): show_cmd = command.subcommands.get("show") assert show_cmd is not None - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - result = await show_cmd.execute() + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + with patch("chuk_term.ui.output"): + result = await show_cmd.execute() - assert result.success is True - mock_action.assert_called_once() + assert result.success is True + mock_ctx.model_manager.get_active_provider.assert_called_once() @pytest.mark.asyncio async def test_execute_show_error(self, command): """Test error handling in show subcommand.""" show_cmd = command.subcommands.get("show") - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Failed to get info") - - result = await show_cmd.execute() + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_active_provider.side_effect = Exception( + "Failed to get info" + ) + with patch("chuk_term.ui.output"): + result = await show_cmd.execute() - assert result.success is False - assert "Failed to get provider info" in result.error + assert result.success is False + assert "Failed to get provider info" in result.error @pytest.mark.asyncio async def test_execute_with_known_subcommand(self, command): """Test that known subcommands are routed to parent.""" - with patch("mcp_cli.commands.actions.providers.provider_action_async"): - # Test various known subcommand aliases - for subcmd in [ - "list", - "ls", - "set", - "use", - "switch", - "show", - "current", - "status", - ]: - result = await command.execute(args=[subcmd]) - # Should be handled by subcommand routing - assert result is not None + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.get_available_providers.return_value = ["openai"] + mock_ctx.model_manager.get_active_provider.return_value = "openai" + mock_ctx.model_manager.get_active_model.return_value = "gpt-4" + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + # Test various known subcommand aliases + for subcmd in [ + "list", + "ls", + "set", + "use", + "switch", + "show", + "current", + "status", + ]: + result = await command.execute(args=[subcmd]) + # Should be handled by subcommand routing + assert result is not None @pytest.mark.asyncio async def test_execute_provider_name_directly(self, command): """Test passing provider name directly (not a subcommand).""" - with patch( - "mcp_cli.commands.actions.providers.provider_action_async" - ) as mock_action: - result = await command.execute(args=["ollama"]) + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + with patch("chuk_term.ui.output"): + result = await command.execute(args=["ollama"]) + + assert result.success is True + # Should treat "ollama" as a provider name to switch to + mock_ctx.model_manager.switch_provider.assert_called_once_with("ollama") + + +class TestProviderListCommandExtended: + """Extended tests for ProviderListCommand.""" + + @pytest.fixture + def command(self): + """Create a ProviderListCommand instance.""" + from mcp_cli.commands.providers.providers import ProviderListCommand + + return ProviderListCommand() + + @pytest.mark.asyncio + async def test_list_no_context(self, command): + """Test list command when context is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_list_no_model_manager(self, command): + """Test list command when model_manager is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error + + def test_get_provider_status_ollama_running(self, command): + """Test _get_provider_status for running Ollama.""" + from mcp_cli.commands.models.provider import ProviderData + + provider = ProviderData(name="ollama", has_api_key=False, models=[]) + + with patch("subprocess.run") as mock_run: + mock_run.return_value.returncode = 0 + mock_run.return_value.stdout = "NAME\nllama2\ncodellama" + + status = command._get_provider_status(provider) + + assert status.icon == "✅" + assert "Running" in status.text + assert "2 models" in status.text + + def test_get_provider_status_ollama_not_running(self, command): + """Test _get_provider_status for non-running Ollama.""" + from mcp_cli.commands.models.provider import ProviderData + + provider = ProviderData(name="ollama", has_api_key=False, models=[]) + + with patch("subprocess.run") as mock_run: + mock_run.return_value.returncode = 1 + + status = command._get_provider_status(provider) + + assert status.icon == "❌" + assert "Not running" in status.text + + def test_get_provider_status_ollama_exception(self, command): + """Test _get_provider_status when Ollama command fails.""" + from mcp_cli.commands.models.provider import ProviderData + + provider = ProviderData(name="ollama", has_api_key=False, models=[]) + + with patch("subprocess.run") as mock_run: + mock_run.side_effect = Exception("Command not found") + + status = command._get_provider_status(provider) + + assert status.icon == "❌" + assert "Not available" in status.text + + def test_get_provider_status_with_api_key_and_models(self, command): + """Test _get_provider_status for provider with API key and models.""" + from mcp_cli.commands.models.provider import ProviderData + + provider = ProviderData( + name="openai", has_api_key=True, models=["gpt-4", "gpt-3.5-turbo"] + ) + + status = command._get_provider_status(provider) + + assert status.icon == "✅" + assert "Configured" in status.text + assert "2 models" in status.text + + def test_get_provider_status_with_api_key_no_models(self, command): + """Test _get_provider_status for provider with API key but no models.""" + from mcp_cli.commands.models.provider import ProviderData + + provider = ProviderData(name="openai", has_api_key=True, models=[]) + + status = command._get_provider_status(provider) + + assert status.icon == "⚠️" + assert "API key set" in status.text + + def test_get_provider_status_no_api_key(self, command): + """Test _get_provider_status for provider without API key.""" + from mcp_cli.commands.models.provider import ProviderData + + provider = ProviderData(name="openai", has_api_key=False, models=[]) + + status = command._get_provider_status(provider) + + assert status.icon == "❌" + assert "No API key" in status.text + + +class TestProviderSetCommandExtended: + """Extended tests for ProviderSetCommand.""" + + @pytest.fixture + def command(self): + """Create a ProviderSetCommand instance.""" + from mcp_cli.commands.providers.providers import ProviderSetCommand + + return ProviderSetCommand() + + @pytest.mark.asyncio + async def test_set_no_context(self, command): + """Test set command when context is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute(provider_name="openai") + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_set_no_model_manager(self, command): + """Test set command when model_manager is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager = None + + result = await command.execute(provider_name="openai") + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_set_args_as_string(self, command): + """Test set command with args as string.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager.switch_provider.return_value = None + + with patch("chuk_term.ui.output"): + result = await command.execute(args="openai") assert result.success is True - # Should treat "ollama" as a provider name to switch to - mock_action.assert_called_once() + + +class TestProviderShowCommandExtended: + """Extended tests for ProviderShowCommand.""" + + @pytest.fixture + def command(self): + """Create a ProviderShowCommand instance.""" + from mcp_cli.commands.providers.providers import ProviderShowCommand + + return ProviderShowCommand() + + @pytest.mark.asyncio + async def test_show_no_context(self, command): + """Test show command when context is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error + + @pytest.mark.asyncio + async def test_show_no_model_manager(self, command): + """Test show command when model_manager is None.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.model_manager = None + + result = await command.execute() + + assert result.success is False + assert "No LLM manager available" in result.error diff --git a/tests/commands/definitions/test_resources_command.py b/tests/commands/definitions/test_resources_command.py index 0f41aa68..2fd32f6e 100644 --- a/tests/commands/definitions/test_resources_command.py +++ b/tests/commands/definitions/test_resources_command.py @@ -1,8 +1,8 @@ """Tests for the resources command.""" import pytest -from unittest.mock import patch -from mcp_cli.commands.definitions.resources import ResourcesCommand +from unittest.mock import patch, AsyncMock +from mcp_cli.commands.resources.resources import ResourcesCommand class TestResourcesCommand: @@ -28,96 +28,101 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_list_all(self, command): """Test listing all resources.""" - with patch( - "mcp_cli.commands.actions.resources.resources_action_async" - ) as mock_action: - mock_action.return_value = { - "resources": [ - { - "name": "database.db", - "server": "sqlite", - "type": "database", - "description": "SQLite database", - }, - { - "name": "config.json", - "server": "filesystem", - "type": "file", - "description": "Configuration file", - }, - ] - } - - result = await command.execute() - - mock_action.assert_called_once_with() - - assert result.success is True + from mcp_cli.tools.models import ResourceInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_resources = [ + ResourceInfo( + id="file:///database.db", + name="database.db", + type="application/x-sqlite3", + ), + ResourceInfo( + id="file:///config.json", + name="config.json", + type="application/json", + ), + ] + mock_ctx.tool_manager.list_resources = AsyncMock( + return_value=mock_resources + ) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute() + + assert result.success is True @pytest.mark.asyncio async def test_execute_by_server(self, command): """Test listing resources for a specific server.""" - with patch( - "mcp_cli.commands.actions.resources.resources_action_async" - ) as mock_action: - mock_action.return_value = { - "resources": [ - {"name": "database.db", "server": "sqlite", "type": "database"} - ] - } - - result = await command.execute(server=0) # server parameter is an index - - mock_action.assert_called_once_with() - - assert result.success is True + from mcp_cli.tools.models import ResourceInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_resources = [ + ResourceInfo( + id="file:///database.db", + name="database.db", + type="application/x-sqlite3", + ), + ] + mock_ctx.tool_manager.list_resources = AsyncMock( + return_value=mock_resources + ) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute( + server=0 + ) # server parameter is an index + + assert result.success is True @pytest.mark.asyncio async def test_execute_detailed(self, command): """Test listing resources with detailed information.""" - with patch( - "mcp_cli.commands.actions.resources.resources_action_async" - ) as mock_action: - mock_action.return_value = { - "resources": [ - { - "name": "database.db", - "server": "sqlite", - "type": "database", - "description": "SQLite database", - "metadata": {"size": "1024KB", "tables": 10}, - } - ] - } - - result = await command.execute(raw=True) - - mock_action.assert_called_once_with() - - assert result.success is True + from mcp_cli.tools.models import ResourceInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_resources = [ + ResourceInfo( + id="file:///database.db", + name="database.db", + type="application/x-sqlite3", + ), + ] + mock_ctx.tool_manager.list_resources = AsyncMock( + return_value=mock_resources + ) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute(raw=True) + + assert result.success is True @pytest.mark.asyncio async def test_execute_no_resources(self, command): """Test when no resources are available.""" - with patch( - "mcp_cli.commands.actions.resources.resources_action_async" - ) as mock_action: - mock_action.return_value = {"resources": []} + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.list_resources = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute() - result = await command.execute() - - assert result.success is True - # Should indicate no resources available + assert result.success is True + # Should indicate no resources available @pytest.mark.asyncio async def test_execute_error_handling(self, command): """Test error handling during execution.""" - with patch( - "mcp_cli.commands.actions.resources.resources_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Server not connected") - - result = await command.execute() - - assert result.success is False - assert "Server not connected" in result.error or result.output + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.list_resources = AsyncMock( + side_effect=Exception("Server not connected") + ) + with patch("chuk_term.ui.output"): + result = await command.execute() + + assert result.success is False + assert "Server not connected" in result.error diff --git a/tests/commands/definitions/test_server_singular.py b/tests/commands/definitions/test_server_singular.py index 0236a2e1..b79ddce0 100644 --- a/tests/commands/definitions/test_server_singular.py +++ b/tests/commands/definitions/test_server_singular.py @@ -1,8 +1,8 @@ """Tests for the server singular command.""" import pytest -from unittest.mock import patch -from mcp_cli.commands.definitions.server_singular import ServerSingularCommand +from unittest.mock import patch, AsyncMock +from mcp_cli.commands.servers.server_singular import ServerSingularCommand class TestServerSingularCommand: @@ -23,23 +23,265 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_no_args(self, command): """Test executing server command without args.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.return_value = [] - result = await command.execute(args=[]) - assert result.success is True - mock_action.assert_called_once() + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute(args=[]) + assert result.success is True @pytest.mark.asyncio async def test_execute_with_server_name(self, command): """Test executing server command with server name.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.return_value = [] - result = await command.execute(args=["test-server"]) - assert result.success is True - # Should pass server name as args - call_args = mock_action.call_args[0][0] - assert call_args.args == ["test-server"] + from mcp_cli.tools.models import ServerInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["test-server"]) + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_no_context(self, command): + """Test executing when no context is available.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute(args=[]) + + assert result.success is False + assert "No tool manager available" in result.error + + @pytest.mark.asyncio + async def test_execute_no_tool_manager(self, command): + """Test executing when context has no tool manager.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager = None + + result = await command.execute(args=[]) + + assert result.success is False + assert "No tool manager available" in result.error + + @pytest.mark.asyncio + async def test_execute_args_as_string(self, command): + """Test executing with args as string instead of list.""" + from mcp_cli.tools.models import ServerInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args="test-server") + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_with_servers(self, command): + """Test listing servers when servers exist.""" + from mcp_cli.tools.models import ServerInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table") as mock_format: + mock_format.return_value = "table" + result = await command.execute(args=[]) + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_command(self, command): + """Test 'list' subcommand.""" + from mcp_cli.tools.models import ServerInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = ServerInfo( + id=1, + name="test-server", + status="running", + connected=True, + tool_count=5, + namespace="test", + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table") as mock_format: + mock_format.return_value = "table" + result = await command.execute(args=["list"]) + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_no_servers(self, command): + """Test 'list' subcommand with no servers.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["list"]) + assert result.success is True + assert "No servers connected" in result.output + + @pytest.mark.asyncio + async def test_execute_add_command(self, command): + """Test 'add' subcommand (not implemented).""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["add", "myserver"]) + assert result.success is False + assert "not yet implemented" in result.error + + @pytest.mark.asyncio + async def test_execute_remove_command(self, command): + """Test 'remove' subcommand (not implemented).""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["remove", "myserver"]) + assert result.success is False + assert "not yet implemented" in result.error + + @pytest.mark.asyncio + async def test_execute_enable_command(self, command): + """Test 'enable' subcommand (not implemented).""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["enable", "myserver"]) + assert result.success is False + assert "not yet implemented" in result.error + + @pytest.mark.asyncio + async def test_execute_disable_command(self, command): + """Test 'disable' subcommand (not implemented).""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["disable", "myserver"]) + assert result.success is False + assert "not yet implemented" in result.error + + @pytest.mark.asyncio + async def test_execute_ping_command(self, command): + """Test 'ping' subcommand (not implemented).""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["ping", "myserver"]) + assert result.success is False + assert "not yet implemented" in result.error + + @pytest.mark.asyncio + async def test_execute_server_not_found(self, command): + """Test when specified server is not found.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["nonexistent-server"]) + assert result.success is False + assert "not found" in result.error + + @pytest.mark.asyncio + async def test_execute_list_exception(self, command): + """Test list command when exception occurs.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock( + side_effect=Exception("Database error") + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args=[]) + assert result.success is False + assert "Failed to list servers" in result.error + + @pytest.mark.asyncio + async def test_execute_list_subcommand_exception(self, command): + """Test 'list' subcommand when exception occurs.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock( + side_effect=Exception("Database error") + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["list"]) + assert result.success is False + assert "Failed to list servers" in result.error + + @pytest.mark.asyncio + async def test_execute_server_details_exception(self, command): + """Test server details when exception occurs.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock( + side_effect=Exception("API error") + ) + with patch("chuk_term.ui.output"): + result = await command.execute(args=["some-server"]) + assert result.success is False + assert "Failed to get server details" in result.error + + @pytest.mark.asyncio + async def test_execute_disconnected_server(self, command): + """Test listing with a disconnected server.""" + from mcp_cli.tools.models import ServerInfo + + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = ServerInfo( + id=1, + name="test-server", + status="stopped", + connected=False, + tool_count=0, + namespace="test", + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table") as mock_format: + mock_format.return_value = "table" + result = await command.execute(args=[]) + assert result.success is True diff --git a/tests/commands/definitions/test_servers_command.py b/tests/commands/definitions/test_servers_command.py index 1fa9caea..2eada9ee 100644 --- a/tests/commands/definitions/test_servers_command.py +++ b/tests/commands/definitions/test_servers_command.py @@ -1,8 +1,30 @@ """Tests for the servers command.""" import pytest -from unittest.mock import patch -from mcp_cli.commands.definitions.servers import ServersCommand +from unittest.mock import patch, MagicMock, AsyncMock +from mcp_cli.commands.servers.servers import ServersCommand +from mcp_cli.tools.models import ServerInfo, TransportType + + +def create_server_info( + name: str, + status: str = "connected", + tool_count: int = 0, + transport: TransportType = TransportType.STDIO, + connected: bool = True, +) -> ServerInfo: + """Create a ServerInfo instance for testing.""" + return ServerInfo( + id=0, + name=name, + status=status, + tool_count=tool_count, + namespace=name, + enabled=True, + connected=connected, + transport=transport, + capabilities={}, + ) class TestServersCommand: @@ -29,101 +51,67 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_basic(self, command): """Test basic execution without parameters.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - # Mock the action to return server data - mock_action.return_value = { - "servers": [ - { - "name": "test-server", - "status": "connected", - "tools": 5, - "resources": 2, - } - ] - } + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[create_server_info("test-server", "connected", 5)] + ) - result = await command.execute() - - # Verify the action was called with ServerActionParams - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.args == [] - assert not call_args.detailed - assert not call_args.show_capabilities - assert not call_args.show_transport - assert call_args.output_format == "table" - assert not call_args.ping_servers + with patch("chuk_term.ui.output"): + result = await command.execute() # Check result assert result.success is True + mock_ctx.tool_manager.get_server_info.assert_called_once() @pytest.mark.asyncio async def test_execute_detailed(self, command): """Test execution with detailed flag.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.return_value = { - "servers": [ - { - "name": "test-server", - "status": "connected", - "tools": 5, - "resources": 2, - "capabilities": ["tools", "resources"], - } - ] - } - - result = await command.execute(detailed=True) - - # Verify the action was called with detailed=True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.detailed - # Note: show_capabilities is controlled by 'raw' not 'detailed' + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[create_server_info("test-server", "connected", 5)] + ) + + with patch("chuk_term.ui.output"): + result = await command.execute(detailed=True) assert result.success is True + mock_ctx.tool_manager.get_server_info.assert_called_once() @pytest.mark.asyncio async def test_execute_with_format(self, command): """Test execution with different output formats.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.return_value = {"servers": []} + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) - # Test with json format - result = await command.execute(format="json") - - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.output_format == "json" + with patch("chuk_term.ui.output"): + # Test with json format + result = await command.execute(format="json") assert result.success is True @pytest.mark.asyncio async def test_execute_error_handling(self, command): """Test error handling during execution.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.side_effect = Exception("Connection failed") + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock( + side_effect=Exception("Connection failed") + ) result = await command.execute() assert result.success is False - assert "Connection failed" in result.error or result.output + assert "Connection failed" in result.error or "Failed" in result.error @pytest.mark.asyncio async def test_execute_no_servers(self, command): """Test execution when no servers are connected.""" - with patch( - "mcp_cli.commands.actions.servers.servers_action_async" - ) as mock_action: - mock_action.return_value = {"servers": []} + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_ctx.tool_manager.get_server_info = AsyncMock(return_value=[]) result = await command.execute() @@ -143,3 +131,219 @@ def test_parameter_validation(self, command): error = command.validate_parameters(format="invalid") assert error is not None assert "Invalid choice" in error + + @pytest.mark.asyncio + async def test_execute_no_context(self, command): + """Test execution when no context is available.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_get_ctx.return_value = None + + result = await command.execute() + + assert result.success is False + assert "No tool manager available" in result.error + + @pytest.mark.asyncio + async def test_execute_no_tool_manager(self, command): + """Test execution when context has no tool manager.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = MagicMock() + mock_ctx.tool_manager = None + mock_get_ctx.return_value = mock_ctx + + result = await command.execute() + + assert result.success is False + assert "No tool manager available" in result.error + + @pytest.mark.asyncio + async def test_execute_json_format_with_servers(self, command): + """Test JSON format output with servers.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = create_server_info("test-server", "connected", 5) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + + with patch("chuk_term.ui.output"): + result = await command.execute(format="json") + + assert result.success is True + assert result.data is not None + assert len(result.data) == 1 + assert result.data[0]["name"] == "test-server" + assert result.data[0]["tool_count"] == 5 + + @pytest.mark.asyncio + async def test_execute_with_ping_flag(self, command): + """Test execution with ping flag.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = create_server_info("test-server", "connected", 5) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + + with patch("chuk_term.ui.output") as mock_output: + result = await command.execute(ping=True) + + assert result.success is True + # Verify ping info was called + mock_output.info.assert_called() + + @pytest.mark.asyncio + async def test_execute_disconnected_server(self, command): + """Test execution with a disconnected server.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = create_server_info( + "test-server", "stopped", 0, connected=False + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + + with patch("chuk_term.ui.output"): + result = await command.execute() + + assert result.success is True + # Table output doesn't populate result.data, but command succeeds + + @pytest.mark.asyncio + async def test_execute_server_with_unknown_transport(self, command): + """Test execution with a server that has unknown transport.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = create_server_info( + "test-server", "connected", 5, transport=TransportType.UNKNOWN + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + + with patch("chuk_term.ui.output"): + # Test detailed view to exercise transport/namespace columns + result = await command.execute(detailed=True) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_json_server_with_unknown_transport(self, command): + """Test JSON output with a server that has unknown transport.""" + with patch("mcp_cli.context.get_context") as mock_get_ctx: + mock_ctx = mock_get_ctx.return_value + mock_server = create_server_info( + "test-server", "connected", 5, transport=TransportType.UNKNOWN + ) + mock_ctx.tool_manager.get_server_info = AsyncMock( + return_value=[mock_server] + ) + + with patch("chuk_term.ui.output"): + result = await command.execute(format="json") + + assert result.success is True + assert result.data[0]["transport"] == "unknown" + + +class TestServersConnectionInfo: + """Test _get_connection_info method.""" + + @pytest.fixture + def command(self): + """Create a ServersCommand instance.""" + return ServersCommand() + + def test_connection_info_stdio_with_command(self, command): + """Test connection info for STDIO with command.""" + server = create_server_info("test", transport=TransportType.STDIO) + server.command = "python" + server.args = None + + result = command._get_connection_info(server) + + assert result == "python" + + def test_connection_info_stdio_with_args(self, command): + """Test connection info for STDIO with command and args.""" + server = create_server_info("test", transport=TransportType.STDIO) + server.command = "python" + server.args = ["-m", "server"] + + result = command._get_connection_info(server) + + assert result == "python -m server" + + def test_connection_info_stdio_with_many_args(self, command): + """Test connection info for STDIO with command and many args (truncated).""" + server = create_server_info("test", transport=TransportType.STDIO) + server.command = "python" + server.args = ["-m", "server", "--verbose", "--debug"] + + result = command._get_connection_info(server) + + assert result == "python -m server ..." + + def test_connection_info_stdio_no_command(self, command): + """Test connection info for STDIO without command.""" + server = create_server_info("test", transport=TransportType.STDIO) + server.command = None + + result = command._get_connection_info(server) + + assert result == "stdio" + + def test_connection_info_http_with_url(self, command): + """Test connection info for HTTP with URL.""" + server = create_server_info("test", transport=TransportType.HTTP) + server.url = "http://localhost:8080" + + result = command._get_connection_info(server) + + assert result == "http://localhost:8080" + + def test_connection_info_http_with_long_url(self, command): + """Test connection info for HTTP with long URL (truncated).""" + server = create_server_info("test", transport=TransportType.HTTP) + server.url = "http://localhost:8080/very/long/path/to/api/endpoint" + + result = command._get_connection_info(server) + + assert result.endswith("...") + assert len(result) == 40 + + def test_connection_info_http_no_url(self, command): + """Test connection info for HTTP without URL.""" + server = create_server_info("test", transport=TransportType.HTTP) + server.url = None + + result = command._get_connection_info(server) + + assert result == "http" + + def test_connection_info_sse_with_url(self, command): + """Test connection info for SSE with URL.""" + server = create_server_info("test", transport=TransportType.SSE) + server.url = "http://localhost:8080/sse" + + result = command._get_connection_info(server) + + assert result == "http://localhost:8080/sse" + + def test_connection_info_sse_no_url(self, command): + """Test connection info for SSE without URL.""" + server = create_server_info("test", transport=TransportType.SSE) + server.url = None + + result = command._get_connection_info(server) + + assert result == "sse" + + def test_connection_info_unknown_transport(self, command): + """Test connection info for unknown transport.""" + server = create_server_info("test", transport=TransportType.UNKNOWN) + + result = command._get_connection_info(server) + + assert result == "unknown" diff --git a/tests/commands/definitions/test_theme_command.py b/tests/commands/definitions/test_theme_command.py index 46853e3e..bb70e2d6 100644 --- a/tests/commands/definitions/test_theme_command.py +++ b/tests/commands/definitions/test_theme_command.py @@ -2,8 +2,8 @@ import pytest from unittest.mock import patch, Mock -from mcp_cli.commands.definitions.theme_singular import ThemeSingularCommand -from mcp_cli.commands.definitions.themes_plural import ThemesPluralCommand +from mcp_cli.commands.theme.theme_singular import ThemeSingularCommand +from mcp_cli.commands.theme.themes_plural import ThemesPluralCommand class TestThemeSingularCommand: @@ -26,36 +26,38 @@ async def test_execute_show_current(self, command): """Test showing current theme.""" # Just test that the command executes without error # The actual theme display is handled by chuk_term which is tested separately - with patch("chuk_term.ui") as mock_ui: - # Mock the entire ui module to avoid import issues - mock_ui.output = Mock() - mock_ui.theme = Mock() - mock_ui.theme.get_theme.return_value = Mock( - name="dark", description="Dark theme" - ) - - result = await command.execute() - - # We just care that it executes successfully - assert result.success is True + with patch("chuk_term.ui.theme.get_theme") as mock_get_theme: + with patch("mcp_cli.utils.preferences.get_preference_manager") as mock_pref: + mock_theme = Mock() + mock_theme.description = "Dark theme" + mock_get_theme.return_value = mock_theme + mock_pref_mgr = Mock() + mock_pref_mgr.get_theme.return_value = "dark" + mock_pref.return_value = mock_pref_mgr + with patch("chuk_term.ui.output"): + result = await command.execute() + + # We just care that it executes successfully + assert result.success is True @pytest.mark.asyncio async def test_execute_set_theme(self, command): """Test setting a theme.""" - with patch("mcp_cli.commands.actions.theme.theme_action_async") as mock_action: - result = await command.execute(args=["dark"]) + with patch("chuk_term.ui.theme.set_theme") as mock_set: + with patch("mcp_cli.utils.preferences.get_preference_manager") as mock_pref: + mock_pref_mgr = Mock() + mock_pref.return_value = mock_pref_mgr + with patch("chuk_term.ui.output"): + result = await command.execute(args=["dark"]) - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert call_args.theme_name == "dark" - assert result.success is True + mock_set.assert_called_once_with("dark") + mock_pref_mgr.set_theme.assert_called_once_with("dark") + assert result.success is True @pytest.mark.asyncio async def test_execute_invalid_theme(self, command): """Test setting an invalid theme.""" - with patch("mcp_cli.commands.actions.theme.theme_action_async") as mock_action: - mock_action.side_effect = ValueError("Invalid theme") - + with patch("chuk_term.ui.output"): result = await command.execute(args=["invalid"]) # The command will catch the exception and return an error @@ -80,16 +82,11 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_list_themes(self, command): """Test listing available themes.""" - with patch("mcp_cli.commands.actions.theme.theme_action_async") as mock_action: - # Mock the theme action - mock_action.return_value = ( - None # theme_action_async doesn't return anything - ) - - result = await command.execute() - - assert result.success is True - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - # For themes plural, it shows list (no specific theme_name) - assert call_args.theme_name is None or call_args.theme_name == "" + with patch("mcp_cli.utils.preferences.get_preference_manager") as mock_pref: + mock_pref_mgr = Mock() + mock_pref_mgr.get_theme.return_value = "dark" + mock_pref.return_value = mock_pref_mgr + with patch("chuk_term.ui.output"): + result = await command.execute() + + assert result.success is True diff --git a/tests/commands/definitions/test_theme_command_extended.py b/tests/commands/definitions/test_theme_command_extended.py index 7718423f..38dfaaea 100644 --- a/tests/commands/definitions/test_theme_command_extended.py +++ b/tests/commands/definitions/test_theme_command_extended.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import patch, MagicMock -from mcp_cli.commands.definitions.theme import ThemeCommand +from mcp_cli.commands.theme.theme import ThemeCommand from mcp_cli.commands.base import CommandMode @@ -41,7 +41,7 @@ async def test_theme_set_valid_theme(theme_command, mock_pref_manager): "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_pref_manager, ): - with patch("mcp_cli.commands.definitions.theme.set_theme") as mock_set_theme: + with patch("mcp_cli.commands.theme.theme.set_theme") as mock_set_theme: result = await theme_command.execute(theme_name="dark") assert result.success is True @@ -71,7 +71,7 @@ async def test_theme_from_args_list(theme_command, mock_pref_manager): "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_pref_manager, ): - with patch("mcp_cli.commands.definitions.theme.set_theme") as mock_set_theme: + with patch("mcp_cli.commands.theme.theme.set_theme") as mock_set_theme: result = await theme_command.execute(args=["monokai"]) assert result.success is True @@ -86,7 +86,7 @@ async def test_theme_from_args_string(theme_command, mock_pref_manager): "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_pref_manager, ): - with patch("mcp_cli.commands.definitions.theme.set_theme") as mock_set_theme: + with patch("mcp_cli.commands.theme.theme.set_theme") as mock_set_theme: result = await theme_command.execute(args="dracula") assert result.success is True @@ -96,39 +96,30 @@ async def test_theme_from_args_string(theme_command, mock_pref_manager): @pytest.mark.asyncio async def test_theme_interactive_selection(theme_command, mock_pref_manager): - """Test interactive theme selection.""" + """Test showing current theme when no theme is provided.""" with patch( "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_pref_manager, ): - with patch( - "mcp_cli.commands.actions.theme._interactive_theme_selection" - ) as mock_interactive: - mock_interactive.return_value = None - + with patch("chuk_term.ui.output"): result = await theme_command.execute() assert result.success is True - mock_interactive.assert_called_once_with(mock_pref_manager) + assert "Current theme: default" in result.output @pytest.mark.asyncio async def test_theme_interactive_selection_error(theme_command, mock_pref_manager): - """Test fallback when interactive selection fails.""" + """Test showing theme info when no theme is provided.""" with patch( "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_pref_manager, ): - with patch( - "mcp_cli.commands.actions.theme._interactive_theme_selection" - ) as mock_interactive: - mock_interactive.side_effect = Exception("Interactive failed") - + with patch("chuk_term.ui.output"): result = await theme_command.execute() assert result.success is True assert "Current theme: default" in result.output - assert "Available themes:" in result.output @pytest.mark.asyncio @@ -149,7 +140,7 @@ async def test_theme_all_valid_themes(theme_command, mock_pref_manager): "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_pref_manager, ): - with patch("mcp_cli.commands.definitions.theme.set_theme") as mock_set_theme: + with patch("mcp_cli.commands.theme.theme.set_theme") as mock_set_theme: for theme in valid_themes: result = await theme_command.execute(theme_name=theme) @@ -160,20 +151,16 @@ async def test_theme_all_valid_themes(theme_command, mock_pref_manager): @pytest.mark.asyncio async def test_theme_empty_args_list(theme_command, mock_pref_manager): - """Test with empty args list.""" + """Test with empty args list shows current theme.""" with patch( "mcp_cli.utils.preferences.get_preference_manager", return_value=mock_pref_manager, ): - with patch( - "mcp_cli.commands.actions.theme._interactive_theme_selection" - ) as mock_interactive: - mock_interactive.return_value = None - + with patch("chuk_term.ui.output"): result = await theme_command.execute(args=[]) assert result.success is True - mock_interactive.assert_called_once() + assert "Current theme: default" in result.output @pytest.mark.asyncio diff --git a/tests/commands/definitions/test_themes_plural_command.py b/tests/commands/definitions/test_themes_plural_command.py index 3925147e..9f8c7ffe 100644 --- a/tests/commands/definitions/test_themes_plural_command.py +++ b/tests/commands/definitions/test_themes_plural_command.py @@ -1,8 +1,8 @@ """Tests for the themes plural command.""" import pytest -from unittest.mock import patch, AsyncMock -from mcp_cli.commands.definitions.themes_plural import ThemesPluralCommand +from unittest.mock import patch, Mock +from mcp_cli.commands.theme.themes_plural import ThemesPluralCommand class TestThemesPluralCommand: @@ -23,42 +23,41 @@ def test_command_properties(self, command): @pytest.mark.asyncio async def test_execute_success(self, command): """Test successful execution.""" - with patch( - "mcp_cli.commands.actions.theme.theme_action_async", new_callable=AsyncMock - ) as mock_action: - result = await command.execute() + with patch("mcp_cli.utils.preferences.get_preference_manager") as mock_pref: + mock_pref_mgr = Mock() + mock_pref_mgr.get_theme.return_value = "dark" + mock_pref.return_value = mock_pref_mgr + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute() - # Should call theme action with empty params - mock_action.assert_called_once() - call_args = mock_action.call_args[0][0] - assert hasattr(call_args.__class__, "model_fields") # It's a Pydantic model - assert result.success is True + assert result.success is True @pytest.mark.asyncio async def test_execute_with_kwargs(self, command): """Test execution with kwargs (should be ignored).""" - with patch( - "mcp_cli.commands.actions.theme.theme_action_async", new_callable=AsyncMock - ) as mock_action: - result = await command.execute(some_arg="value") + with patch("mcp_cli.utils.preferences.get_preference_manager") as mock_pref: + mock_pref_mgr = Mock() + mock_pref_mgr.get_theme.return_value = "dark" + mock_pref.return_value = mock_pref_mgr + with patch("chuk_term.ui.output"): + with patch("chuk_term.ui.format_table"): + result = await command.execute(some_arg="value") - # Should still call theme action successfully - mock_action.assert_called_once() - assert result.success is True + # Should still call successfully + assert result.success is True @pytest.mark.asyncio async def test_execute_error_handling(self, command): """Test error handling during execution.""" - with patch( - "mcp_cli.commands.actions.theme.theme_action_async", new_callable=AsyncMock - ) as mock_action: - mock_action.side_effect = Exception("Theme list failed") + with patch("mcp_cli.utils.preferences.get_preference_manager") as mock_pref: + mock_pref.side_effect = Exception("Theme list failed") + with patch("chuk_term.ui.output"): + result = await command.execute() - result = await command.execute() - - assert result.success is False - assert "Failed to list themes" in result.error - assert "Theme list failed" in result.error + assert result.success is False + assert "Failed to list themes" in result.error + assert "Theme list failed" in result.error def test_help_text_content(self, command): """Test that help text contains expected information.""" diff --git a/tests/commands/definitions/test_token_command.py b/tests/commands/definitions/test_token_command.py index 9c58b30b..2832c5ef 100644 --- a/tests/commands/definitions/test_token_command.py +++ b/tests/commands/definitions/test_token_command.py @@ -1,8 +1,8 @@ """Tests for the token command.""" import pytest -from unittest.mock import patch, AsyncMock -from mcp_cli.commands.definitions.token import TokenCommand +from unittest.mock import patch, Mock +from mcp_cli.commands.tokens.token import TokenCommand from mcp_cli.commands.base import CommandMode @@ -22,160 +22,177 @@ def test_command_properties(self, command): assert "Manage OAuth and authentication tokens" in command.help_text assert "set" in command.help_text # Should include set command assert "get" in command.help_text # Should include get command - assert command.modes == (CommandMode.CHAT | CommandMode.INTERACTIVE) + assert command.modes == ( + CommandMode.CLI | CommandMode.CHAT | CommandMode.INTERACTIVE + ) assert command.requires_context # Needs context to get server list @pytest.mark.asyncio async def test_execute_no_args(self, command): """Test execution with no arguments - should list tokens.""" - with patch( - "mcp_cli.commands.actions.token.token_list_action_async", - new_callable=AsyncMock, - ) as mock_list: - result = await command.execute() + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute(tool_manager=Mock(servers=[])) - # Should call list action - mock_list.assert_called_once() - call_args = mock_list.call_args[0][0] - assert hasattr(call_args.__class__, "model_fields") # It's a Pydantic model - assert result.success is True + assert result.success is True @pytest.mark.asyncio async def test_execute_list_subcommand(self, command): """Test execution with 'list' subcommand.""" - with patch( - "mcp_cli.commands.actions.token.token_list_action_async", - new_callable=AsyncMock, - ) as mock_list: - result = await command.execute(args=["list"]) + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + args=["list"], tool_manager=Mock(servers=[]) + ) - # Should call list action - mock_list.assert_called_once() - assert result.success is True + # Should call list action + assert result.success is True @pytest.mark.asyncio async def test_execute_clear_without_force(self, command): """Test execution with 'clear' subcommand without force flag.""" - with patch( - "mcp_cli.commands.actions.token.token_clear_action_async", - new_callable=AsyncMock, - ) as mock_clear: - result = await command.execute(args=["clear"]) + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_mgr.return_value = mock_manager + with patch("chuk_term.ui.prompts.confirm") as mock_confirm: + mock_confirm.return_value = False # User cancels + with patch("chuk_term.ui.output"): + result = await command.execute( + args=["clear"], tool_manager=Mock(servers=[]) + ) - # Should call clear action with force=False - mock_clear.assert_called_once() - call_args = mock_clear.call_args[0][0] - assert not call_args.force - assert result.success is True + # Should be cancelled + assert result.success is False @pytest.mark.asyncio async def test_execute_clear_with_force_long(self, command): """Test execution with 'clear' subcommand with --force flag.""" - with patch( - "mcp_cli.commands.actions.token.token_clear_action_async", - new_callable=AsyncMock, - ) as mock_clear: - result = await command.execute(args=["clear", "--force"]) + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_manager.token_store = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["clear", "--force"], tool_manager=Mock(servers=[]) + ) - # Should call clear action with force=True - mock_clear.assert_called_once() - call_args = mock_clear.call_args[0][0] - assert call_args.force - assert result.success is True + # Should call clear with force=True + assert result.success is True @pytest.mark.asyncio async def test_execute_clear_with_force_short(self, command): """Test execution with 'clear' subcommand with -f flag.""" - with patch( - "mcp_cli.commands.actions.token.token_clear_action_async", - new_callable=AsyncMock, - ) as mock_clear: - result = await command.execute(args=["clear", "-f"]) + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_manager.token_store = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["clear", "-f"], tool_manager=Mock(servers=[]) + ) - # Should call clear action with force=True - mock_clear.assert_called_once() - call_args = mock_clear.call_args[0][0] - assert call_args.force - assert result.success is True + # Should call clear with force=True + assert result.success is True @pytest.mark.asyncio async def test_execute_delete_with_name(self, command): """Test execution with 'delete' subcommand and token name.""" - with patch( - "mcp_cli.commands.actions.token.token_delete_action_async", - new_callable=AsyncMock, - ) as mock_delete: - result = await command.execute(args=["delete", "github"]) + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.token_store.delete_generic.return_value = True + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete", "github"], tool_manager=Mock(servers=[]) + ) - # Should call delete action with the token name - mock_delete.assert_called_once() - call_args = mock_delete.call_args[0][0] - assert call_args.name == "github" - assert call_args.oauth is True - assert result.success is True + # Should call delete action with the token name + assert result.success is True @pytest.mark.asyncio async def test_execute_delete_without_name(self, command): """Test execution with 'delete' subcommand but no token name.""" - with patch("chuk_term.ui.output") as mock_output: - result = await command.execute(args=["delete"]) + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete"], tool_manager=Mock(servers=[]) + ) # Should show error - mock_output.error.assert_called_with( - "Token name required for delete command" - ) - mock_output.hint.assert_called() assert result.success is False + assert "Token name is required" in result.error @pytest.mark.asyncio async def test_execute_unknown_subcommand(self, command): """Test execution with unknown subcommand.""" - with patch("chuk_term.ui.output") as mock_output: - result = await command.execute(args=["unknown"]) + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["unknown"], tool_manager=Mock(servers=[]) + ) # Should show error - mock_output.error.assert_called_with("Unknown token subcommand: unknown") - assert mock_output.hint.call_count == 2 # Two hints assert result.success is False + assert "Unknown token action" in result.error @pytest.mark.asyncio async def test_execute_string_arg(self, command): """Test execution with string argument instead of list.""" - with patch( - "mcp_cli.commands.actions.token.token_list_action_async", - new_callable=AsyncMock, - ) as mock_list: - # Pass a string arg (should be converted to list) - result = await command.execute(args="list") + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + # Pass a string arg (should be converted to list) + result = await command.execute( + args="list", tool_manager=Mock(servers=[]) + ) - mock_list.assert_called_once() - assert result.success is True + assert result.success is True @pytest.mark.asyncio async def test_execute_empty_list(self, command): """Test execution with empty list of arguments.""" - with patch( - "mcp_cli.commands.actions.token.token_list_action_async", - new_callable=AsyncMock, - ) as mock_list: - result = await command.execute(args=[]) + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + args=[], tool_manager=Mock(servers=[]) + ) - # Should call list action (default) - mock_list.assert_called_once() - assert result.success is True + # Should call list action (default) + assert result.success is True @pytest.mark.asyncio async def test_execute_case_insensitive_subcommand(self, command): """Test that subcommands are case-insensitive.""" - with patch( - "mcp_cli.commands.actions.token.token_list_action_async", - new_callable=AsyncMock, - ) as mock_list: - result = await command.execute(args=["LIST"]) + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + args=["LIST"], tool_manager=Mock(servers=[]) + ) - # Should call list action (subcommand converted to lowercase) - mock_list.assert_called_once() - assert result.success is True + # Should call list action (subcommand converted to lowercase) + assert result.success is True @pytest.mark.asyncio async def test_help_text_content(self, command): @@ -191,87 +208,1101 @@ async def test_help_text_content(self, command): def test_modes_flags(self, command): """Test that command modes are correctly set.""" - # Should be available in CHAT and INTERACTIVE but not CLI + # Should be available in all modes assert CommandMode.CHAT in command.modes assert CommandMode.INTERACTIVE in command.modes - assert CommandMode.CLI not in command.modes + assert CommandMode.CLI in command.modes @pytest.mark.asyncio async def test_execute_set_with_name_and_value(self, command): """Test execution with 'set' subcommand and token name/value.""" - with patch( - "mcp_cli.commands.actions.token.token_set_action_async", - new_callable=AsyncMock, - ) as mock_set: - result = await command.execute(args=["set", "my-api", "secret-token"]) - - # Should call set action with the token name and value - mock_set.assert_called_once() - call_args = mock_set.call_args[0][0] - assert call_args.name == "my-api" - assert call_args.value == "secret-token" - assert call_args.token_type == "bearer" - assert call_args.namespace == "generic" - assert result.success is True + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set", "my-api", "secret-token"], + tool_manager=Mock(servers=[]), + ) + + # Should call set action with the token name and value + assert result.success is True @pytest.mark.asyncio async def test_execute_set_without_value(self, command): """Test execution with 'set' subcommand but no token value.""" - with patch("chuk_term.ui.output") as mock_output: - result = await command.execute(args=["set", "my-api"]) + with patch("chuk_term.ui.output"): + with patch("getpass.getpass") as mock_getpass: + mock_getpass.return_value = "" # Empty value + # Pass name via kwargs since we're testing the case where value is prompted + result = await command.execute( + args=["set"], name="my-api", tool_manager=Mock(servers=[]) + ) - # Should show error - mock_output.error.assert_called_with( - "Token name and value required for set command" - ) - mock_output.hint.assert_called() - assert result.success is False + # Should show error when getpass returns empty + assert result.success is False + assert "Token value is required" in result.error @pytest.mark.asyncio async def test_execute_set_without_name_or_value(self, command): """Test execution with 'set' subcommand but no arguments.""" - with patch("chuk_term.ui.output") as mock_output: - result = await command.execute(args=["set"]) + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute(args=["set"], tool_manager=Mock(servers=[])) # Should show error - mock_output.error.assert_called_with( - "Token name and value required for set command" - ) - mock_output.hint.assert_called() assert result.success is False + assert "Token name is required" in result.error @pytest.mark.asyncio async def test_execute_get_with_name(self, command): """Test execution with 'get' subcommand and token name.""" - from mcp_cli.constants import OAUTH_NAMESPACE, GENERIC_NAMESPACE - - with patch( - "mcp_cli.commands.actions.token.token_get_action_async", - new_callable=AsyncMock, - ) as mock_get: - result = await command.execute(args=["get", "notion"]) - - # Should call get action twice (OAuth namespace then generic) - assert mock_get.call_count == 2 - # First call should be for OAuth namespace (mcp-cli) - first_call_name = mock_get.call_args_list[0][0][0] - first_call_kwargs = mock_get.call_args_list[0][1] - assert first_call_name == "notion" - assert first_call_kwargs["namespace"] == OAUTH_NAMESPACE - # Second call should be for generic namespace - second_call_name = mock_get.call_args_list[1][0][0] - second_call_kwargs = mock_get.call_args_list[1][1] - assert second_call_name == "notion" - assert second_call_kwargs["namespace"] == GENERIC_NAMESPACE - assert result.success is True + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.token_store._retrieve_raw.return_value = None + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get", "notion"], tool_manager=Mock(servers=[]) + ) + + # Should call get action + assert result.success is False # Token not found @pytest.mark.asyncio async def test_execute_get_without_name(self, command): """Test execution with 'get' subcommand but no token name.""" - with patch("chuk_term.ui.output") as mock_output: - result = await command.execute(args=["get"]) + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute(args=["get"], tool_manager=Mock(servers=[])) # Should show error - mock_output.error.assert_called_with("Token name required for get command") - mock_output.hint.assert_called() assert result.success is False + assert "Token name is required" in result.error + + @pytest.mark.asyncio + async def test_execute_backends(self, command): + """Test execution with 'backends' subcommand.""" + with patch("mcp_cli.commands.tokens.token.TokenStoreFactory") as mock_factory: + from mcp_cli.auth import TokenStoreBackend + + mock_factory.get_available_backends.return_value = [ + TokenStoreBackend.ENCRYPTED_FILE + ] + mock_factory._detect_backend.return_value = TokenStoreBackend.ENCRYPTED_FILE + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + args=["backends"], tool_manager=Mock(servers=[]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_provider(self, command): + """Test execution with 'set-provider' subcommand.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.set_provider_token" + ) as mock_set_provider: + mock_set_provider.return_value = True + with patch( + "mcp_cli.auth.provider_tokens.get_provider_env_var_name" + ) as mock_env: + mock_env.return_value = "ANTHROPIC_API_KEY" + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set-provider"], + provider="anthropic", + api_key="test-key", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_provider_no_provider(self, command): + """Test execution with 'set-provider' but no provider name.""" + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set-provider"], tool_manager=Mock(servers=[]) + ) + + assert result.success is False + assert "Provider name is required" in result.error + + @pytest.mark.asyncio + async def test_execute_get_provider(self, command): + """Test execution with 'get-provider' subcommand.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.check_provider_token_status" + ) as mock_status: + mock_status.return_value = { + "has_token": True, + "source": "storage", + "env_var": "ANTHROPIC_API_KEY", + "in_env": False, + "in_storage": True, + } + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_get_provider_no_token(self, command): + """Test execution with 'get-provider' but no token found.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.check_provider_token_status" + ) as mock_status: + mock_status.return_value = { + "has_token": False, + "source": None, + "env_var": "ANTHROPIC_API_KEY", + "in_env": False, + "in_storage": False, + } + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_get_provider_no_provider(self, command): + """Test execution with 'get-provider' but no provider name.""" + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get-provider"], tool_manager=Mock(servers=[]) + ) + + assert result.success is False + assert "Provider name is required" in result.error + + @pytest.mark.asyncio + async def test_execute_delete_provider(self, command): + """Test execution with 'delete-provider' subcommand.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.delete_provider_token" + ) as mock_delete: + mock_delete.return_value = True + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_delete_provider_not_found(self, command): + """Test execution with 'delete-provider' but token not found.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.delete_provider_token" + ) as mock_delete: + mock_delete.return_value = False + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + + @pytest.mark.asyncio + async def test_execute_delete_provider_no_provider(self, command): + """Test execution with 'delete-provider' but no provider name.""" + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete-provider"], tool_manager=Mock(servers=[]) + ) + + assert result.success is False + assert "Provider name is required" in result.error + + @pytest.mark.asyncio + async def test_execute_set_api_key(self, command): + """Test execution with 'set' subcommand with api-key type.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set", "my-api", "secret-token"], + token_type="api-key", + provider="openai", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_api_key_no_provider(self, command): + """Test execution with 'set' api-key type but no provider.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set", "my-api", "secret-token"], + token_type="api-key", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Provider name is required" in result.error + + @pytest.mark.asyncio + async def test_execute_set_generic(self, command): + """Test execution with 'set' subcommand with generic type.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set", "my-token", "secret-value"], + token_type="generic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_unknown_type(self, command): + """Test execution with 'set' subcommand with unknown type.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set", "my-token", "secret-value"], + token_type="invalid-type", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Unknown token type" in result.error + + @pytest.mark.asyncio + async def test_execute_get_found_in_oauth(self, command): + """Test execution with 'get' finding token in OAuth namespace.""" + import json + from mcp_cli.auth import TokenType + + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_store = Mock() + # First call returns None (generic namespace), second returns token (OAuth) + mock_store._retrieve_raw.side_effect = [ + None, + json.dumps( + { + "token_type": TokenType.BEARER.value, + "token": "test-token", + "created_at": 1234567890, + } + ), + ] + mock_manager.token_store = mock_store + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get", "notion"], tool_manager=Mock(servers=[]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_delete_oauth(self, command): + """Test execution with 'delete' subcommand for OAuth token.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.delete_tokens.return_value = True + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete", "notion"], + is_oauth=True, + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_delete_oauth_not_found(self, command): + """Test execution with 'delete' OAuth token not found.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.delete_tokens.return_value = False + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete", "notion"], + is_oauth=True, + tool_manager=Mock(servers=[]), + ) + + # Should return True even when not found (just warning) + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_delete_with_namespace(self, command): + """Test execution with 'delete' and specific namespace.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.token_store.delete_generic.return_value = True + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete", "my-token"], + namespace="bearer", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_delete_not_found(self, command): + """Test execution with 'delete' token not found.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.token_store.delete_generic.return_value = False + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete", "my-token"], + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + + @pytest.mark.asyncio + async def test_execute_clear_with_tokens(self, command): + """Test execution with 'clear' subcommand when tokens exist.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [ + {"name": "token1", "namespace": "generic"} + ] + mock_manager.token_store.delete_generic.return_value = True + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["clear", "--force"], + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + mock_manager.registry.clear_all.assert_called_once() + + @pytest.mark.asyncio + async def test_execute_clear_with_namespace(self, command): + """Test execution with 'clear' subcommand with specific namespace.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [ + {"name": "token1", "namespace": "bearer"} + ] + mock_manager.token_store.delete_generic.return_value = True + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["clear", "--force"], + namespace="bearer", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + mock_manager.registry.clear_namespace.assert_called_once_with("bearer") + + @pytest.mark.asyncio + async def test_execute_list_with_oauth_tokens(self, command): + """Test execution with 'list' showing OAuth tokens.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + # Mock load_tokens to return OAuth tokens + mock_tokens = Mock() + mock_tokens.expires_in = 3600 + mock_tokens.issued_at = 1234567890 + mock_manager.load_tokens.return_value = mock_tokens + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + tool_manager=Mock(servers=["notion-server"]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_with_provider_tokens(self, command): + """Test execution with 'list' showing provider tokens.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_manager.load_tokens.return_value = None + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = { + "anthropic": { + "env_var": "ANTHROPIC_API_KEY", + "in_env": False, + } + } + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute(tool_manager=Mock(servers=[])) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_with_provider_tokens_env_override(self, command): + """Test execution with 'list' showing provider tokens with env override.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_manager.load_tokens.return_value = None + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = { + "anthropic": { + "env_var": "ANTHROPIC_API_KEY", + "in_env": True, + } + } + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute(tool_manager=Mock(servers=[])) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_error(self, command): + """Test execution with 'list' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Token manager error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute(tool_manager=Mock(servers=[])) + + assert result.success is False + assert "Error listing tokens" in result.error + + @pytest.mark.asyncio + async def test_execute_set_error(self, command): + """Test execution with 'set' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Store error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set", "token", "value"], + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Error storing token" in result.error + + @pytest.mark.asyncio + async def test_execute_get_error(self, command): + """Test execution with 'get' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Retrieve error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get", "token"], + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Error retrieving token" in result.error + + @pytest.mark.asyncio + async def test_execute_delete_error(self, command): + """Test execution with 'delete' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Delete error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete", "token"], + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Error deleting token" in result.error + + @pytest.mark.asyncio + async def test_execute_clear_error(self, command): + """Test execution with 'clear' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Clear error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["clear", "--force"], + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Error clearing tokens" in result.error + + @pytest.mark.asyncio + async def test_execute_backends_error(self, command): + """Test execution with 'backends' when error occurs.""" + with patch("mcp_cli.commands.tokens.token.TokenStoreFactory") as mock_factory: + mock_factory.get_available_backends.side_effect = Exception("Backend error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["backends"], tool_manager=Mock(servers=[]) + ) + + assert result.success is False + assert "Error listing backends" in result.error + + @pytest.mark.asyncio + async def test_execute_set_provider_error(self, command): + """Test execution with 'set-provider' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Provider error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set-provider"], + provider="anthropic", + api_key="key", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Error storing provider token" in result.error + + @pytest.mark.asyncio + async def test_execute_get_provider_error(self, command): + """Test execution with 'get-provider' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Provider error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Error retrieving provider token info" in result.error + + @pytest.mark.asyncio + async def test_execute_delete_provider_error(self, command): + """Test execution with 'delete-provider' when error occurs.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_mgr.side_effect = Exception("Provider error") + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["delete-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "Error deleting provider token" in result.error + + @pytest.mark.asyncio + async def test_execute_exception_wrapper(self, command): + """Test that general exceptions are caught by execute wrapper.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch.object( + command, "_action_list", side_effect=Exception("Unexpected error") + ): + result = await command.execute(tool_manager=Mock(servers=[])) + + assert result.success is False + assert "Token command error" in result.error + + @pytest.mark.asyncio + async def test_execute_list_with_registered_tokens(self, command): + """Test execution with 'list' showing registered tokens.""" + import time + from mcp_cli.auth import TokenType + + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + # Return tokens in registry + mock_manager.registry.list_tokens.return_value = [ + { + "name": "my-token", + "type": TokenType.BEARER.value, + "namespace": "generic", + "registered_at": time.time(), + "metadata": {"expires_at": time.time() + 3600}, + } + ] + mock_manager.load_tokens.return_value = None + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = {} + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute(tool_manager=Mock(servers=[])) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_with_expired_token(self, command): + """Test execution with 'list' showing expired token.""" + import time + from mcp_cli.auth import TokenType + + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + # Return expired token + mock_manager.registry.list_tokens.return_value = [ + { + "name": "expired-token", + "type": TokenType.BEARER.value, + "namespace": "generic", + "registered_at": time.time() - 7200, + "metadata": {"expires_at": time.time() - 3600}, # Expired + } + ] + mock_manager.load_tokens.return_value = None + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = {} + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute(tool_manager=Mock(servers=[])) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_with_api_key_token(self, command): + """Test execution with 'list' showing API key token.""" + import time + from mcp_cli.auth import TokenType + + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [ + { + "name": "api-key", + "type": TokenType.API_KEY.value, + "namespace": "api-key", + "registered_at": time.time(), + "metadata": {"provider": "openai"}, + } + ] + mock_manager.load_tokens.return_value = None + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = {} + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute(tool_manager=Mock(servers=[])) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_oauth_with_no_issued_at(self, command): + """Test execution with 'list' showing OAuth token without issued_at.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_tokens = Mock() + mock_tokens.expires_in = 3600 + mock_tokens.issued_at = None # No issued_at + mock_manager.load_tokens.return_value = mock_tokens + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = {} + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + tool_manager=Mock(servers=["notion"]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_oauth_expired(self, command): + """Test execution with 'list' showing expired OAuth token.""" + import time + + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_tokens = Mock() + mock_tokens.expires_in = 3600 + mock_tokens.issued_at = ( + time.time() - 7200 + ) # Issued 2 hours ago, expires in 1 hour + mock_manager.load_tokens.return_value = mock_tokens + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = {} + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + tool_manager=Mock(servers=["notion"]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_get_with_display_info(self, command): + """Test execution with 'get' showing full token info.""" + import json + + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_store = Mock() + mock_store._retrieve_raw.return_value = json.dumps( + { + "token_type": "bearer", + "token": "test-token", + "created_at": 1234567890, + } + ) + mock_manager.token_store = mock_store + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get", "my-token"], tool_manager=Mock(servers=[]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_get_parse_error(self, command): + """Test execution with 'get' when token data parsing fails.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_store = Mock() + mock_store._retrieve_raw.return_value = "invalid-json" + mock_manager.token_store = mock_store + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["get", "my-token"], tool_manager=Mock(servers=[]) + ) + + # Should still return True but with warning + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_clear_confirm_with_namespace(self, command): + """Test execution with 'clear' confirming with namespace.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [ + {"name": "token1", "namespace": "bearer"} + ] + mock_manager.token_store.delete_generic.return_value = True + mock_mgr.return_value = mock_manager + with patch("chuk_term.ui.prompts.confirm") as mock_confirm: + mock_confirm.return_value = True # User confirms + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["clear"], + namespace="bearer", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_bearer_with_expires(self, command): + """Test execution with 'set' bearer token that has expiration.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.commands.tokens.token.BearerToken") as mock_bearer: + mock_bearer_instance = Mock() + mock_bearer_instance.expires_at = 1234567890 + mock_bearer_instance.to_stored_token.return_value = Mock( + metadata={}, model_dump=Mock(return_value={}) + ) + mock_bearer.return_value = mock_bearer_instance + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set", "my-token", "secret"], + token_type="bearer", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_provider_with_prompt(self, command): + """Test execution with 'set-provider' prompting for api key.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch("getpass.getpass") as mock_getpass: + mock_getpass.return_value = "prompted-key" + with patch( + "mcp_cli.auth.provider_tokens.set_provider_token" + ) as mock_set: + mock_set.return_value = True + with patch( + "mcp_cli.auth.provider_tokens.get_provider_env_var_name" + ) as mock_env: + mock_env.return_value = "ANTHROPIC_API_KEY" + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_provider_empty_key(self, command): + """Test execution with 'set-provider' with empty key.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch("getpass.getpass") as mock_getpass: + mock_getpass.return_value = "" # Empty + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set-provider"], + provider="anthropic", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + assert "API key is required" in result.error + + @pytest.mark.asyncio + async def test_execute_set_provider_with_env_var_override(self, command): + """Test execution with 'set-provider' when env var is also set.""" + import os + + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.auth.provider_tokens.set_provider_token") as mock_set: + mock_set.return_value = True + with patch( + "mcp_cli.auth.provider_tokens.get_provider_env_var_name" + ) as mock_env: + mock_env.return_value = "ANTHROPIC_API_KEY" + with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "existing-key"}): + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set-provider"], + provider="anthropic", + api_key="new-key", + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_provider_failure(self, command): + """Test execution with 'set-provider' when storage fails.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_mgr.return_value = mock_manager + with patch("mcp_cli.auth.provider_tokens.set_provider_token") as mock_set: + mock_set.return_value = False # Failure + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + args=["set-provider"], + provider="anthropic", + api_key="test-key", + tool_manager=Mock(servers=[]), + ) + + assert result.success is False + + @pytest.mark.asyncio + async def test_execute_backends_with_env_override(self, command): + """Test execution with 'backends' when env override is set.""" + import os + from mcp_cli.auth import TokenStoreBackend + + with patch.dict(os.environ, {"MCP_CLI_TOKEN_BACKEND": "encrypted"}): + with patch( + "mcp_cli.commands.tokens.token.TokenStoreFactory" + ) as mock_factory: + mock_factory.get_available_backends.return_value = [ + TokenStoreBackend.ENCRYPTED_FILE + ] + with patch( + "mcp_cli.commands.tokens.token.TokenStoreBackend" + ) as mock_backend: + mock_backend.return_value = TokenStoreBackend.ENCRYPTED_FILE + mock_backend.side_effect = ( + lambda x: TokenStoreBackend.ENCRYPTED_FILE + if x == "encrypted" + else TokenStoreBackend.KEYCHAIN + ) + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + args=["backends"], tool_manager=Mock(servers=[]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_backends_with_invalid_env_override(self, command): + """Test execution with 'backends' when env override is invalid.""" + import os + from mcp_cli.auth import TokenStoreBackend + + with patch.dict(os.environ, {"MCP_CLI_TOKEN_BACKEND": "invalid_backend"}): + with patch( + "mcp_cli.commands.tokens.token.TokenStoreFactory" + ) as mock_factory: + mock_factory.get_available_backends.return_value = [ + TokenStoreBackend.ENCRYPTED_FILE + ] + mock_factory._detect_backend.return_value = ( + TokenStoreBackend.ENCRYPTED_FILE + ) + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + args=["backends"], tool_manager=Mock(servers=[]) + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_list_with_no_servers_configured(self, command): + """Test execution with 'list' showing no servers message.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.registry.list_tokens.return_value = [] + mock_manager.load_tokens.return_value = None + mock_mgr.return_value = mock_manager + with patch( + "mcp_cli.auth.provider_tokens.list_all_provider_tokens" + ) as mock_list: + mock_list.return_value = {} + with patch("mcp_cli.commands.tokens.token.output"): + with patch("mcp_cli.commands.tokens.token.format_table"): + result = await command.execute( + show_providers=False, + show_oauth=True, + tool_manager=Mock(servers=[]), + ) + + assert result.success is True + + @pytest.mark.asyncio + async def test_execute_set_with_value_prompt(self, command): + """Test execution with 'set' prompting for value.""" + with patch("mcp_cli.commands.tokens.token._get_token_manager") as mock_mgr: + mock_manager = Mock() + mock_manager.token_store = Mock() + mock_manager.registry = Mock() + mock_mgr.return_value = mock_manager + with patch("getpass.getpass") as mock_getpass: + mock_getpass.return_value = "prompted-value" + with patch("mcp_cli.commands.tokens.token.output"): + result = await command.execute( + name="my-token", + tool_manager=Mock(servers=[]), + action="set", + ) + + assert result.success is True + + +class TestGetTokenManager: + """Test _get_token_manager function.""" + + def test_get_token_manager_with_env_override(self): + """Test _get_token_manager with environment variable override.""" + import os + from mcp_cli.commands.tokens.token import _get_token_manager + + with patch.dict(os.environ, {"MCP_CLI_TOKEN_BACKEND": "encrypted"}): + with patch("mcp_cli.commands.tokens.token.TokenManager") as mock_tm: + _get_token_manager() + # Should use encrypted backend + mock_tm.assert_called_once() + + def test_get_token_manager_with_invalid_env_override(self): + """Test _get_token_manager with invalid environment variable.""" + import os + from mcp_cli.commands.tokens.token import _get_token_manager + + with patch.dict(os.environ, {"MCP_CLI_TOKEN_BACKEND": "invalid_backend"}): + with patch("mcp_cli.commands.tokens.token.get_config") as mock_config: + mock_config.return_value.token_store_backend = "encrypted" + with patch("mcp_cli.commands.tokens.token.TokenManager") as mock_tm: + _get_token_manager() + mock_tm.assert_called_once() + + def test_get_token_manager_config_error(self): + """Test _get_token_manager when config raises error.""" + import os + from mcp_cli.commands.tokens.token import _get_token_manager, TokenStoreBackend + + # Clear the env var so it falls through to config + with patch.dict(os.environ, {}, clear=True): + if "MCP_CLI_TOKEN_BACKEND" in os.environ: + del os.environ["MCP_CLI_TOKEN_BACKEND"] + with patch("mcp_cli.commands.tokens.token.get_config") as mock_config: + mock_config.side_effect = Exception("Config error") + with patch("mcp_cli.commands.tokens.token.TokenManager") as mock_tm: + _get_token_manager() + # Should default to AUTO + call_args = mock_tm.call_args + assert call_args.kwargs["backend"] == TokenStoreBackend.AUTO diff --git a/tests/commands/definitions/test_tool_history_command.py b/tests/commands/definitions/test_tool_history_command.py index 0d8f7e2a..7258aaa2 100644 --- a/tests/commands/definitions/test_tool_history_command.py +++ b/tests/commands/definitions/test_tool_history_command.py @@ -4,7 +4,7 @@ import json from unittest.mock import MagicMock, patch -from mcp_cli.commands.definitions.tool_history import ToolHistoryCommand +from mcp_cli.commands.tools.tool_history import ToolHistoryCommand from mcp_cli.commands.base import CommandMode @@ -88,9 +88,9 @@ async def test_tool_history_empty_history(tool_history_command): @pytest.mark.asyncio async def test_tool_history_table_view(tool_history_command, mock_chat_context): """Test default table view of tool history.""" - with patch("mcp_cli.commands.definitions.tool_history.output") as mock_output: + with patch("mcp_cli.commands.tools.tool_history.output") as mock_output: with patch( - "mcp_cli.commands.definitions.tool_history.format_table" + "mcp_cli.commands.tools.tool_history.format_table" ) as mock_format_table: mock_format_table.return_value = "formatted_table" @@ -110,9 +110,9 @@ async def test_tool_history_table_view(tool_history_command, mock_chat_context): @pytest.mark.asyncio async def test_tool_history_with_limit(tool_history_command, mock_chat_context): """Test tool history with limit parameter.""" - with patch("mcp_cli.commands.definitions.tool_history.output") as mock_output: + with patch("mcp_cli.commands.tools.tool_history.output") as mock_output: with patch( - "mcp_cli.commands.definitions.tool_history.format_table" + "mcp_cli.commands.tools.tool_history.format_table" ) as mock_format_table: mock_format_table.return_value = "formatted_table" @@ -148,7 +148,7 @@ async def test_tool_history_json_output(tool_history_command, mock_chat_context) @pytest.mark.asyncio async def test_tool_history_row_detail(tool_history_command, mock_chat_context): """Test detailed view of specific row.""" - with patch("mcp_cli.commands.definitions.tool_history.output") as mock_output: + with patch("mcp_cli.commands.tools.tool_history.output") as mock_output: result = await tool_history_command.execute( chat_context=mock_chat_context, row=2 ) @@ -175,7 +175,7 @@ async def test_tool_history_invalid_row(tool_history_command, mock_chat_context) @pytest.mark.asyncio async def test_tool_history_row_from_args_list(tool_history_command, mock_chat_context): """Test row number from args list.""" - with patch("mcp_cli.commands.definitions.tool_history.output") as mock_output: + with patch("mcp_cli.commands.tools.tool_history.output") as mock_output: result = await tool_history_command.execute( chat_context=mock_chat_context, args=["1"] ) @@ -189,7 +189,7 @@ async def test_tool_history_row_from_args_string( tool_history_command, mock_chat_context ): """Test row number from args string.""" - with patch("mcp_cli.commands.definitions.tool_history.output") as mock_output: + with patch("mcp_cli.commands.tools.tool_history.output") as mock_output: result = await tool_history_command.execute( chat_context=mock_chat_context, args="3" ) @@ -201,9 +201,9 @@ async def test_tool_history_row_from_args_string( @pytest.mark.asyncio async def test_tool_history_invalid_args(tool_history_command, mock_chat_context): """Test with non-numeric args.""" - with patch("mcp_cli.commands.definitions.tool_history.output") as mock_output: + with patch("mcp_cli.commands.tools.tool_history.output") as mock_output: with patch( - "mcp_cli.commands.definitions.tool_history.format_table" + "mcp_cli.commands.tools.tool_history.format_table" ) as mock_format_table: mock_format_table.return_value = "formatted_table" @@ -229,9 +229,9 @@ async def test_tool_history_truncate_long_arguments(tool_history_command): } ] - with patch("mcp_cli.commands.definitions.tool_history.output"): + with patch("mcp_cli.commands.tools.tool_history.output"): with patch( - "mcp_cli.commands.definitions.tool_history.format_table" + "mcp_cli.commands.tools.tool_history.format_table" ) as mock_format_table: mock_format_table.return_value = "formatted_table" diff --git a/tests/commands/definitions/test_tools_command_extended.py b/tests/commands/definitions/test_tools_command_extended.py index 5d23758e..32893c7e 100644 --- a/tests/commands/definitions/test_tools_command_extended.py +++ b/tests/commands/definitions/test_tools_command_extended.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import patch, MagicMock, AsyncMock -from mcp_cli.commands.definitions.tools import ToolsCommand +from mcp_cli.commands.tools.tools import ToolsCommand class TestToolsCommand: @@ -34,7 +34,7 @@ async def test_tools_list_all(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx @@ -65,7 +65,7 @@ async def test_tools_filter_by_server(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool1, mock_tool2]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx @@ -97,7 +97,7 @@ async def test_tools_show_detail(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx @@ -110,7 +110,7 @@ async def test_tools_show_detail(self, command): @pytest.mark.asyncio async def test_tools_no_manager(self, command): """Test when no tool manager is available.""" - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_get_ctx.return_value = None result = await command.execute() @@ -131,7 +131,7 @@ async def test_tools_invalid_filter(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx @@ -154,7 +154,7 @@ async def test_tools_raw_output(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx @@ -178,7 +178,7 @@ async def test_tools_with_details_flag(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx diff --git a/tests/commands/definitions/test_tools_command_simple.py b/tests/commands/definitions/test_tools_command_simple.py index 5131cefb..319c6a91 100644 --- a/tests/commands/definitions/test_tools_command_simple.py +++ b/tests/commands/definitions/test_tools_command_simple.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch, MagicMock, AsyncMock -from mcp_cli.commands.definitions.tools import ToolsCommand +from mcp_cli.commands.tools.tools import ToolsCommand from mcp_cli.commands.base import UnifiedCommand @@ -45,7 +45,7 @@ async def test_execute_no_filter(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx @@ -77,7 +77,7 @@ async def test_execute_with_server_filter(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool1, mock_tool2]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx @@ -106,7 +106,7 @@ async def test_execute_with_tool_filter(self, command): mock_tm = MagicMock() mock_tm.get_unique_tools = AsyncMock(return_value=[mock_tool]) - with patch("mcp_cli.commands.definitions.tools.get_context") as mock_get_ctx: + with patch("mcp_cli.commands.tools.tools.get_context") as mock_get_ctx: mock_ctx = MagicMock() mock_ctx.tool_manager = mock_tm mock_get_ctx.return_value = mock_ctx diff --git a/tests/commands/definitions/test_tools_coverage.py b/tests/commands/definitions/test_tools_coverage.py index f1928d45..92bf7ec3 100644 --- a/tests/commands/definitions/test_tools_coverage.py +++ b/tests/commands/definitions/test_tools_coverage.py @@ -3,7 +3,7 @@ import pytest from unittest.mock import MagicMock, AsyncMock, patch -from mcp_cli.commands.definitions.tools import ToolsCommand +from mcp_cli.commands.tools.tools import ToolsCommand class TestToolsCommandCoverage: @@ -32,7 +32,7 @@ async def test_execute_no_tools_available(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): result = await self.tools_cmd.execute() @@ -57,9 +57,9 @@ async def test_execute_with_args_as_string(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute(args="test_tool") assert result.success is True @@ -73,7 +73,7 @@ async def test_execute_with_exception(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): result = await self.tools_cmd.execute() @@ -99,9 +99,9 @@ async def test_show_tool_details_with_echo_tool(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute(filter="echo_custom") assert result.success is True @@ -124,9 +124,9 @@ async def test_show_tool_details_with_sql_tool(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute(filter="custom_query_tool") assert result.success is True @@ -159,9 +159,9 @@ async def test_show_tool_details_with_parameters_and_defaults(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute(filter="test_tool") assert result.success is True @@ -184,9 +184,9 @@ async def test_show_tool_details_without_parameters(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute(filter="simple_tool") assert result.success is True @@ -209,9 +209,9 @@ async def test_show_tools_table_with_truncation(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute() assert result.success is True @@ -234,9 +234,9 @@ async def test_show_tools_table_with_details_no_truncation(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute(details=True) assert result.success is True @@ -259,9 +259,9 @@ async def test_server_name_detection_with_namespace(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute() assert result.success is True @@ -284,9 +284,9 @@ async def test_server_name_detection_with_echo_pattern(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute() assert result.success is True @@ -309,9 +309,9 @@ async def test_server_name_detection_with_sql_pattern(self): mock_context.tool_manager = mock_tm with patch( - "mcp_cli.commands.definitions.tools.get_context", return_value=mock_context + "mcp_cli.commands.tools.tools.get_context", return_value=mock_context ): - with patch("mcp_cli.commands.definitions.tools.output"): + with patch("mcp_cli.commands.tools.tools.output"): result = await self.tools_cmd.execute() assert result.success is True diff --git a/tests/commands/definitions/test_verbose_command.py b/tests/commands/definitions/test_verbose_command.py index e4ef7e33..50ffbd03 100644 --- a/tests/commands/definitions/test_verbose_command.py +++ b/tests/commands/definitions/test_verbose_command.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import Mock -from mcp_cli.commands.definitions.verbose import VerboseCommand +from mcp_cli.commands.core.verbose import VerboseCommand from mcp_cli.commands.base import CommandMode diff --git a/tests/commands/models/test_cmd_models.py b/tests/commands/models/test_cmd_models.py new file mode 100644 index 00000000..90f09a39 --- /dev/null +++ b/tests/commands/models/test_cmd_models.py @@ -0,0 +1,280 @@ +# tests/commands/models/test_cmd_models.py +"""Tests for cmd command models.""" + +from mcp_cli.commands.models.cmd import ( + CmdActionParams, + LLMResponse, + Message, + MessageRole, + ToolCall, + ToolCallFunction, +) + + +class TestMessageRole: + """Test MessageRole enum.""" + + def test_message_role_values(self): + """Test all MessageRole values exist.""" + assert MessageRole.SYSTEM.value == "system" + assert MessageRole.USER.value == "user" + assert MessageRole.ASSISTANT.value == "assistant" + assert MessageRole.TOOL.value == "tool" + + def test_message_role_is_str(self): + """Test MessageRole is a string enum.""" + assert isinstance(MessageRole.USER, str) + assert MessageRole.USER == "user" + + +class TestToolCallFunction: + """Test ToolCallFunction model.""" + + def test_creation_with_string_arguments(self): + """Test creating with string arguments.""" + func = ToolCallFunction(name="get_weather", arguments='{"city": "NYC"}') + + assert func.name == "get_weather" + assert func.arguments == '{"city": "NYC"}' + + def test_creation_with_dict_arguments(self): + """Test creating with dict arguments.""" + func = ToolCallFunction(name="get_weather", arguments={"city": "NYC"}) + + assert func.name == "get_weather" + assert func.arguments == {"city": "NYC"} + + +class TestToolCall: + """Test ToolCall model.""" + + def test_creation(self): + """Test creating a tool call.""" + tc = ToolCall( + id="call_123", + function=ToolCallFunction(name="test_tool", arguments="{}"), + ) + + assert tc.id == "call_123" + assert tc.function.name == "test_tool" + + def test_from_dict(self): + """Test creating from chuk-llm dict format.""" + data = { + "id": "call_456", + "function": {"name": "get_data", "arguments": '{"query": "test"}'}, + } + + tc = ToolCall.from_dict(data) + + assert tc.id == "call_456" + assert tc.function.name == "get_data" + assert tc.function.arguments == '{"query": "test"}' + + def test_from_dict_missing_fields(self): + """Test from_dict with missing fields uses defaults.""" + data = {} + + tc = ToolCall.from_dict(data) + + assert tc.id == "" + assert tc.function.name == "" + assert tc.function.arguments == "{}" + + +class TestMessage: + """Test Message model.""" + + def test_creation_user_message(self): + """Test creating a user message.""" + msg = Message(role=MessageRole.USER, content="Hello") + + assert msg.role == MessageRole.USER + assert msg.content == "Hello" + assert msg.tool_calls is None + assert msg.tool_call_id is None + assert msg.name is None + + def test_creation_assistant_with_tool_calls(self): + """Test creating assistant message with tool calls.""" + tc = ToolCall( + id="call_123", + function=ToolCallFunction(name="test", arguments="{}"), + ) + msg = Message( + role=MessageRole.ASSISTANT, + content="", + tool_calls=[tc], + ) + + assert msg.role == MessageRole.ASSISTANT + assert len(msg.tool_calls) == 1 + assert msg.tool_calls[0].id == "call_123" + + def test_creation_tool_message(self): + """Test creating a tool response message.""" + msg = Message( + role=MessageRole.TOOL, + content='{"result": "success"}', + tool_call_id="call_123", + name="test_tool", + ) + + assert msg.role == MessageRole.TOOL + assert msg.tool_call_id == "call_123" + assert msg.name == "test_tool" + + def test_from_dict_simple(self): + """Test from_dict with simple message.""" + data = {"role": "user", "content": "Hello world"} + + msg = Message.from_dict(data) + + assert msg.role == MessageRole.USER + assert msg.content == "Hello world" + + def test_from_dict_with_tool_calls(self): + """Test from_dict with tool calls.""" + data = { + "role": "assistant", + "content": "", + "tool_calls": [ + {"id": "call_1", "function": {"name": "tool1", "arguments": "{}"}}, + {"id": "call_2", "function": {"name": "tool2", "arguments": "{}"}}, + ], + } + + msg = Message.from_dict(data) + + assert msg.role == MessageRole.ASSISTANT + assert len(msg.tool_calls) == 2 + assert msg.tool_calls[0].function.name == "tool1" + assert msg.tool_calls[1].function.name == "tool2" + + def test_from_dict_tool_message(self): + """Test from_dict with tool response message.""" + data = { + "role": "tool", + "content": "result data", + "tool_call_id": "call_123", + "name": "my_tool", + } + + msg = Message.from_dict(data) + + assert msg.role == MessageRole.TOOL + assert msg.content == "result data" + assert msg.tool_call_id == "call_123" + assert msg.name == "my_tool" + + def test_from_dict_missing_content(self): + """Test from_dict with missing content defaults to empty string.""" + data = {"role": "user"} + + msg = Message.from_dict(data) + + assert msg.content == "" + + +class TestLLMResponse: + """Test LLMResponse model.""" + + def test_creation_simple(self): + """Test creating a simple response.""" + resp = LLMResponse(response="Hello!") + + assert resp.response == "Hello!" + assert resp.tool_calls == [] + + def test_creation_with_tool_calls(self): + """Test creating response with tool calls.""" + tc = ToolCall( + id="call_123", + function=ToolCallFunction(name="test", arguments="{}"), + ) + resp = LLMResponse(response="", tool_calls=[tc]) + + assert resp.response == "" + assert len(resp.tool_calls) == 1 + + def test_from_dict_simple(self): + """Test from_dict with simple response.""" + data = {"response": "Hello world"} + + resp = LLMResponse.from_dict(data) + + assert resp.response == "Hello world" + assert resp.tool_calls == [] + + def test_from_dict_with_tool_calls(self): + """Test from_dict with tool calls.""" + data = { + "response": "", + "tool_calls": [ + {"id": "call_1", "function": {"name": "tool1", "arguments": "{}"}}, + ], + } + + resp = LLMResponse.from_dict(data) + + assert resp.response == "" + assert len(resp.tool_calls) == 1 + assert resp.tool_calls[0].function.name == "tool1" + + def test_from_dict_empty_tool_calls(self): + """Test from_dict with empty tool_calls list.""" + data = {"response": "test", "tool_calls": []} + + resp = LLMResponse.from_dict(data) + + assert resp.tool_calls == [] + + def test_from_dict_missing_response(self): + """Test from_dict with missing response defaults to empty string.""" + data = {} + + resp = LLMResponse.from_dict(data) + + assert resp.response == "" + + +class TestCmdActionParams: + """Test CmdActionParams model.""" + + def test_default_params(self): + """Test default parameter values.""" + params = CmdActionParams() + + assert params.input_file is None + assert params.output_file is None + assert params.prompt is None + assert params.tool is None + assert params.tool_args is None + assert params.system_prompt is None + assert params.raw is False + assert params.single_turn is False + assert params.max_turns == 100 + + def test_custom_params(self): + """Test custom parameter values.""" + params = CmdActionParams( + input_file="input.txt", + output_file="output.txt", + prompt="Do something", + tool="my_tool", + tool_args='{"arg": "value"}', + system_prompt="You are helpful", + raw=True, + single_turn=True, + max_turns=10, + ) + + assert params.input_file == "input.txt" + assert params.output_file == "output.txt" + assert params.prompt == "Do something" + assert params.tool == "my_tool" + assert params.tool_args == '{"arg": "value"}' + assert params.system_prompt == "You are helpful" + assert params.raw is True + assert params.single_turn is True + assert params.max_turns == 10 diff --git a/tests/commands/models/test_server_models.py b/tests/commands/models/test_server_models.py index 4663143f..d42b50e8 100644 --- a/tests/commands/models/test_server_models.py +++ b/tests/commands/models/test_server_models.py @@ -2,10 +2,12 @@ import pytest from pydantic import ValidationError + from mcp_cli.commands.models.server import ( ServerActionParams, - ServerStatusInfo, + ServerCapabilities, ServerPerformanceInfo, + ServerStatusInfo, ) @@ -121,3 +123,75 @@ def test_required_fields(self): """Test that icon and latency are required.""" with pytest.raises(ValidationError): ServerPerformanceInfo(ping_ms=25.5) + + +class TestServerCapabilities: + """Test ServerCapabilities model.""" + + def test_default_capabilities(self): + """Test default values.""" + caps = ServerCapabilities() + + assert caps.tools is False + assert caps.prompts is False + assert caps.resources is False + assert caps.experimental == {} + + def test_custom_capabilities(self): + """Test with custom values.""" + caps = ServerCapabilities( + tools=True, + prompts=True, + resources=True, + experimental={"events": True, "streaming": True}, + ) + + assert caps.tools is True + assert caps.prompts is True + assert caps.resources is True + assert caps.experimental["events"] is True + + def test_has_events_property(self): + """Test has_events property.""" + caps_no_events = ServerCapabilities() + assert caps_no_events.has_events is False + + caps_with_events = ServerCapabilities(experimental={"events": True}) + assert caps_with_events.has_events is True + + caps_events_false = ServerCapabilities(experimental={"events": False}) + assert caps_events_false.has_events is False + + def test_has_streaming_property(self): + """Test has_streaming property.""" + caps_no_streaming = ServerCapabilities() + assert caps_no_streaming.has_streaming is False + + caps_with_streaming = ServerCapabilities(experimental={"streaming": True}) + assert caps_with_streaming.has_streaming is True + + def test_to_display_string_empty(self): + """Test to_display_string with no capabilities.""" + caps = ServerCapabilities() + assert caps.to_display_string() == "None" + + def test_to_display_string_basic(self): + """Test to_display_string with basic capabilities.""" + caps = ServerCapabilities(tools=True) + assert caps.to_display_string() == "Tools" + + caps2 = ServerCapabilities(tools=True, prompts=True) + assert caps2.to_display_string() == "Tools, Prompts" + + caps3 = ServerCapabilities(tools=True, prompts=True, resources=True) + assert caps3.to_display_string() == "Tools, Prompts, Resources" + + def test_to_display_string_with_experimental(self): + """Test to_display_string with experimental capabilities.""" + caps = ServerCapabilities( + tools=True, experimental={"events": True, "streaming": True} + ) + display = caps.to_display_string() + assert "Tools" in display + assert "Events*" in display + assert "Streaming*" in display diff --git a/tests/commands/test_enums.py b/tests/commands/test_enums.py new file mode 100644 index 00000000..6d1db67c --- /dev/null +++ b/tests/commands/test_enums.py @@ -0,0 +1,173 @@ +# tests/commands/test_enums.py +"""Tests for commands/enums.py - Enum definitions.""" + +from mcp_cli.commands.enums import ( + CommandAction, + ErrorMessages, + OutputFormat, + ProviderCommand, + ServerCommand, + SpecialValues, + SuccessMessages, + TokenNamespace, + ToolCommand, +) +from mcp_cli.commands.models.provider import TokenSource +from mcp_cli.tools.models import TransportType + + +class TestCommandAction: + """Test CommandAction enum.""" + + def test_command_action_values(self): + """Test all CommandAction values exist.""" + assert CommandAction.LIST.value == "list" + assert CommandAction.ADD.value == "add" + assert CommandAction.REMOVE.value == "remove" + assert CommandAction.SET.value == "set" + assert CommandAction.GET.value == "get" + assert CommandAction.DELETE.value == "delete" + assert CommandAction.CREATE.value == "create" + assert CommandAction.UPDATE.value == "update" + assert CommandAction.SHOW.value == "show" + assert CommandAction.ENABLE.value == "enable" + assert CommandAction.DISABLE.value == "disable" + assert CommandAction.VALIDATE.value == "validate" + assert CommandAction.STATUS.value == "status" + assert CommandAction.DETAILS.value == "details" + assert CommandAction.REFRESH.value == "refresh" + assert CommandAction.CONFIG.value == "config" + assert CommandAction.DIAGNOSTIC.value == "diagnostic" + + def test_command_action_is_str(self): + """Test CommandAction is a string enum.""" + assert isinstance(CommandAction.LIST, str) + assert CommandAction.LIST == "list" + + +class TestTokenNamespace: + """Test TokenNamespace enum.""" + + def test_token_namespace_values(self): + """Test all TokenNamespace values exist.""" + assert TokenNamespace.GENERIC.value == "generic" + assert TokenNamespace.PROVIDER.value == "provider" + assert TokenNamespace.BEARER.value == "bearer" + assert TokenNamespace.API_KEY.value == "api-key" + assert TokenNamespace.OAUTH.value == "oauth" + + def test_token_namespace_is_str(self): + """Test TokenNamespace is a string enum.""" + assert isinstance(TokenNamespace.GENERIC, str) + + +class TestTransportType: + """Test TransportType enum.""" + + def test_transport_type_values(self): + """Test all TransportType values exist.""" + assert TransportType.STDIO.value == "stdio" + assert TransportType.SSE.value == "sse" + assert TransportType.HTTP.value == "http" + + +class TestOutputFormat: + """Test OutputFormat enum.""" + + def test_output_format_values(self): + """Test all OutputFormat values exist.""" + assert OutputFormat.JSON.value == "json" + assert OutputFormat.TABLE.value == "table" + assert OutputFormat.TEXT.value == "text" + + +class TestProviderCommand: + """Test ProviderCommand enum.""" + + def test_provider_command_values(self): + """Test all ProviderCommand values exist.""" + assert ProviderCommand.LIST.value == "list" + assert ProviderCommand.ADD.value == "add" + assert ProviderCommand.REMOVE.value == "remove" + assert ProviderCommand.SET.value == "set" + assert ProviderCommand.CONFIG.value == "config" + assert ProviderCommand.DIAGNOSTIC.value == "diagnostic" + assert ProviderCommand.CUSTOM.value == "custom" + + +class TestTokenSource: + """Test TokenSource enum.""" + + def test_token_source_values(self): + """Test all TokenSource values exist.""" + assert TokenSource.ENV.value == "env" + assert TokenSource.STORAGE.value == "storage" + assert TokenSource.NONE.value == "none" + + +class TestServerCommand: + """Test ServerCommand enum.""" + + def test_server_command_values(self): + """Test all ServerCommand values exist.""" + assert ServerCommand.LIST.value == "list" + assert ServerCommand.ADD.value == "add" + assert ServerCommand.REMOVE.value == "remove" + assert ServerCommand.ENABLE.value == "enable" + assert ServerCommand.DISABLE.value == "disable" + assert ServerCommand.STATUS.value == "status" + + +class TestToolCommand: + """Test ToolCommand enum.""" + + def test_tool_command_values(self): + """Test all ToolCommand values exist.""" + assert ToolCommand.LIST.value == "list" + assert ToolCommand.DETAILS.value == "details" + assert ToolCommand.VALIDATE.value == "validate" + assert ToolCommand.STATUS.value == "status" + assert ToolCommand.ENABLE.value == "enable" + assert ToolCommand.DISABLE.value == "disable" + assert ToolCommand.LIST_DISABLED.value == "list-disabled" + assert ToolCommand.AUTO_FIX.value == "auto-fix" + assert ToolCommand.CLEAR_VALIDATION.value == "clear-validation" + assert ToolCommand.VALIDATION_ERRORS.value == "validation-errors" + + +class TestSpecialValues: + """Test SpecialValues constants.""" + + def test_special_values(self): + """Test all SpecialValues constants exist.""" + assert SpecialValues.STDIN == "-" + assert SpecialValues.STDOUT == "-" + assert SpecialValues.OLLAMA == "ollama" + assert SpecialValues.USER == "User" + + +class TestErrorMessages: + """Test ErrorMessages constants.""" + + def test_error_messages(self): + """Test all ErrorMessages constants exist.""" + assert ErrorMessages.NO_CONTEXT == "Context not initialized" + assert ErrorMessages.NO_TOOL_MANAGER == "Tool manager not available" + assert ErrorMessages.NO_MODEL_MANAGER == "Model manager not available" + assert ErrorMessages.NO_SERVER_MANAGER == "Server manager not available" + assert ErrorMessages.INVALID_PROVIDER == "Unknown provider" + assert ErrorMessages.INVALID_SERVER == "Unknown server" + assert ErrorMessages.INVALID_TOOL == "Unknown tool" + assert ErrorMessages.NO_OPERATION == "No operation specified" + + +class TestSuccessMessages: + """Test SuccessMessages constants.""" + + def test_success_messages(self): + """Test all SuccessMessages constants exist.""" + assert SuccessMessages.TOKEN_STORED == "Token stored successfully" + assert SuccessMessages.TOKEN_DELETED == "Token deleted successfully" + assert SuccessMessages.SERVER_ADDED == "Server added successfully" + assert SuccessMessages.SERVER_REMOVED == "Server removed successfully" + assert SuccessMessages.PROVIDER_SWITCHED == "Provider switched successfully" diff --git a/tests/commands/test_main.py b/tests/commands/test_main.py index f8d70358..06d12307 100644 --- a/tests/commands/test_main.py +++ b/tests/commands/test_main.py @@ -14,6 +14,7 @@ def test_ping_command_exists(): assert result.exit_code == 0 assert "Test connectivity to MCP servers" in result.stdout + def test_all_direct_commands_are_registered(): """ Test that all commands intended for direct registration are present. @@ -40,4 +41,6 @@ def test_all_direct_commands_are_registered(): registered_commands = [cmd.name for cmd in app.registered_commands if cmd.name] for cmd_name in expected_commands: - assert cmd_name in registered_commands, f"Command '{cmd_name}' is not registered in main.py" + assert cmd_name in registered_commands, ( + f"Command '{cmd_name}' is not registered in main.py" + ) diff --git a/tests/commands/test_registry.py b/tests/commands/test_registry.py index 836f1c09..9a191174 100644 --- a/tests/commands/test_registry.py +++ b/tests/commands/test_registry.py @@ -273,7 +273,6 @@ def test_clear_registry(self): assert self.registry.get("test") is None assert len(self.registry._commands) == 0 - assert len(self.registry._groups) == 0 def test_reset_singleton(self): """Test resetting the singleton instance.""" @@ -287,16 +286,15 @@ def test_reset_singleton(self): assert registry1 is not registry2 assert registry2.get("test") is None - def test_register_to_nonexistent_group(self): - """Test registering a command to a non-existent group.""" + def test_register_with_group_param_ignored(self): + """Test that the deprecated group parameter is ignored.""" cmd = DummyCommand(name="subcommand") - # Try to register to a group that doesn't exist - # Should log warning and not register + # Group parameter is ignored - command is registered normally self.registry.register(cmd, group="nonexistent_group") - # Command should not be registered - assert self.registry.get("subcommand") is None + # Command should be registered (group param is ignored) + assert self.registry.get("subcommand") is cmd def test_get_command_names_with_hidden_commands(self): """Test that hidden commands are not in the command names list.""" @@ -325,20 +323,17 @@ def test_register_command_group_and_subcommands(self): # Register the group as a top-level command self.registry.register(group) - # Pre-register the group in _groups for subcommand registration - self.registry._groups["tools"] = group - - # Create and register subcommands + # Add subcommands via CommandGroup.add_subcommand() list_cmd = DummyCommand(name="list", description="List tools") call_cmd = DummyCommand(name="call", description="Call a tool") - self.registry.register(list_cmd, group="tools") - self.registry.register(call_cmd, group="tools") + group.add_subcommand(list_cmd) + group.add_subcommand(call_cmd) # Verify the group is registered assert self.registry.get("tools") is group - # Verify we can get subcommands + # Verify we can get subcommands via "group subcommand" syntax assert self.registry.get("tools list") is list_cmd assert self.registry.get("tools call") is call_cmd @@ -347,14 +342,13 @@ def test_get_subcommand_with_mode_filter(self): # Create a command group group = DummyCommandGroup(name="tools", modes=CommandMode.ALL) self.registry.register(group) - self.registry._groups["tools"] = group - # Create subcommands with different modes + # Add subcommands with different modes list_cmd = DummyCommand(name="list", modes=CommandMode.CHAT) call_cmd = DummyCommand(name="call", modes=CommandMode.CLI) - self.registry.register(list_cmd, group="tools") - self.registry.register(call_cmd, group="tools") + group.add_subcommand(list_cmd) + group.add_subcommand(call_cmd) # Get subcommand with mode filter assert self.registry.get("tools list", mode=CommandMode.CHAT) is list_cmd @@ -368,11 +362,10 @@ def test_get_nonexistent_subcommand(self): # Create a command group group = DummyCommandGroup(name="tools") self.registry.register(group) - self.registry._groups["tools"] = group - # Create a subcommand + # Add a subcommand list_cmd = DummyCommand(name="list") - self.registry.register(list_cmd, group="tools") + group.add_subcommand(list_cmd) # Try to get non-existent subcommand assert self.registry.get("tools nonexistent") is None diff --git a/tests/config/test_cli_options.py b/tests/config/test_cli_options.py index ea0a5d17..8f08ce9f 100644 --- a/tests/config/test_cli_options.py +++ b/tests/config/test_cli_options.py @@ -9,11 +9,16 @@ from mcp_cli.config.cli_options import ( load_config, extract_server_names, - inject_logging_env_vars, process_options, get_config_summary, ) -from mcp_cli.config.config_manager import MCPConfig +from mcp_cli.config import MCPConfig + + +# Stub for removed function - kept for backwards compatibility with tests +def inject_logging_env_vars(config, quiet=False): + """Stub for removed inject_logging_env_vars function.""" + pass @pytest.fixture @@ -53,7 +58,7 @@ def test_load_config_valid(valid_config): config = load_config(str(valid_config)) assert isinstance(config, MCPConfig) assert "ServerA" in config.servers - assert config.servers["ServerA"].command == "server-a-cmd" + assert config.servers["ServerA"]["command"] == "server-a-cmd" def test_load_config_missing(tmp_path): @@ -314,38 +319,13 @@ def test_process_options_http_server(mock_discovery, monkeypatch, tmp_path): assert os.environ["LLM_MODEL"] == "gpt-5" +@pytest.mark.skip(reason="Config modification removed - configs are now immutable") @patch("mcp_cli.config.cli_options.trigger_discovery_after_setup") def test_process_options_quiet_mode(mock_discovery, monkeypatch, tmp_path, caplog): """Test that quiet mode suppresses server noise.""" - mock_discovery.return_value = 0 - - config_content = { - "mcpServers": { - "QuietServer": {"command": "quiet-cmd", "env": {"EXISTING_VAR": "value"}} - } - } - config_file = tmp_path / "quiet_config.json" - config_file.write_text(json.dumps(config_content)) - - monkeypatch.delenv("LLM_PROVIDER", raising=False) - - # Process with quiet=True - servers_list, user_specified, server_names = process_options( - server="QuietServer", - disable_filesystem=False, - provider="ollama", - model="gpt-oss", - config_file=str(config_file), - quiet=True, - ) - - # When quiet=True, logging env vars should be injected - # This is tested indirectly - the function should complete without error - assert servers_list == ["QuietServer"] - - # The modified config should have been created - # Check that environment contains path to modified config - assert "MCP_CLI_MODIFIED_CONFIG" in os.environ + # This test is no longer relevant as process_options no longer creates + # modified configs - the original config is used directly + pass @patch("mcp_cli.config.cli_options.output") @@ -600,6 +580,9 @@ def test_load_config_with_content_but_invalid_json(self, tmp_path): assert config is None +@pytest.mark.skip( + reason="inject_logging_env_vars function was removed during refactoring" +) class TestInjectLoggingEnvVars: """Test inject_logging_env_vars function.""" @@ -803,42 +786,18 @@ def test_process_options_with_validation_errors( "Server1 missing command" in record.message for record in caplog.records ) + @pytest.mark.skip( + reason="MCPConfig is now immutable and doesn't have save_to_file method" + ) @patch("mcp_cli.config.cli_options.trigger_discovery_after_setup") @patch("mcp_cli.utils.preferences.get_preference_manager") def test_process_options_modified_config_save_error( self, mock_pref, mock_discovery, tmp_path, monkeypatch, caplog ): """Test handling of error when saving modified config.""" - mock_discovery.return_value = 0 - - mock_pm = MagicMock() - mock_pm.is_server_disabled.return_value = False - mock_pref.return_value = mock_pm - - config_content = {"mcpServers": {"Server1": {"command": "cmd1", "args": []}}} - config_file = tmp_path / "config.json" - config_file.write_text(json.dumps(config_content)) - - monkeypatch.delenv("LLM_PROVIDER", raising=False) - - # Mock save_to_file to raise exception - with patch.object( - MCPConfig, "save_to_file", side_effect=Exception("Save failed") - ): - with caplog.at_level(logging.WARNING): - servers_list, _, _ = process_options( - server="Server1", - disable_filesystem=True, - provider="openai", - model="gpt-4", - config_file=str(config_file), - ) - - # Should log warning about failed save - assert any( - "Failed to create modified config" in record.message - for record in caplog.records - ) + # This test is no longer relevant as MCPConfig is immutable + # and process_options no longer modifies or saves config files + pass class TestGetConfigSummary: diff --git a/tests/config/test_config_init.py b/tests/config/test_config_init.py new file mode 100644 index 00000000..9431843d --- /dev/null +++ b/tests/config/test_config_init.py @@ -0,0 +1,113 @@ +# tests/config/test_config_init.py +"""Tests for config/__init__.py module.""" + +import json +import pytest + +from mcp_cli.config import ( + load_runtime_config, + load_runtime_config_async, + RuntimeConfig, + ConfigOverride, +) + + +class TestLoadRuntimeConfig: + """Test load_runtime_config function.""" + + def test_load_runtime_config_default_path(self, tmp_path, monkeypatch): + """Test loading config with default path.""" + # Create a config file in a temp location + config_data = {"default_provider": "anthropic", "default_model": "claude-3"} + config_file = tmp_path / "server_config.json" + config_file.write_text(json.dumps(config_data)) + + monkeypatch.chdir(tmp_path) + + config = load_runtime_config() + assert isinstance(config, RuntimeConfig) + assert config.provider == "anthropic" + + def test_load_runtime_config_custom_path(self, tmp_path): + """Test loading config from custom path.""" + config_data = {"default_provider": "openai", "default_model": "gpt-4"} + config_file = tmp_path / "custom_config.json" + config_file.write_text(json.dumps(config_data)) + + config = load_runtime_config(str(config_file)) + assert isinstance(config, RuntimeConfig) + assert config.provider == "openai" + assert config.model == "gpt-4" + + def test_load_runtime_config_with_overrides(self, tmp_path): + """Test loading config with CLI overrides.""" + config_data = {"defaultProvider": "openai"} + config_file = tmp_path / "server_config.json" + config_file.write_text(json.dumps(config_data)) + + overrides = ConfigOverride(provider="anthropic", model="claude-3") + config = load_runtime_config(str(config_file), overrides) + + # Overrides should take precedence + assert config.provider == "anthropic" + assert config.model == "claude-3" + + def test_load_runtime_config_nonexistent_file(self, tmp_path, monkeypatch): + """Test loading config when file doesn't exist returns defaults.""" + monkeypatch.chdir(tmp_path) + # No file created - should use defaults + + config = load_runtime_config() + assert isinstance(config, RuntimeConfig) + + +class TestLoadRuntimeConfigAsync: + """Test load_runtime_config_async function.""" + + @pytest.mark.asyncio + async def test_load_runtime_config_async_default_path(self, tmp_path, monkeypatch): + """Test async loading config with default path.""" + config_data = {"default_provider": "anthropic", "default_model": "claude-3"} + config_file = tmp_path / "server_config.json" + config_file.write_text(json.dumps(config_data)) + + monkeypatch.chdir(tmp_path) + + config = await load_runtime_config_async() + assert isinstance(config, RuntimeConfig) + assert config.provider == "anthropic" + + @pytest.mark.asyncio + async def test_load_runtime_config_async_custom_path(self, tmp_path): + """Test async loading config from custom path.""" + config_data = {"default_provider": "openai", "default_model": "gpt-4-turbo"} + config_file = tmp_path / "custom_async_config.json" + config_file.write_text(json.dumps(config_data)) + + config = await load_runtime_config_async(str(config_file)) + assert isinstance(config, RuntimeConfig) + assert config.provider == "openai" + assert config.model == "gpt-4-turbo" + + @pytest.mark.asyncio + async def test_load_runtime_config_async_with_overrides(self, tmp_path): + """Test async loading config with CLI overrides.""" + config_data = {"defaultProvider": "openai"} + config_file = tmp_path / "server_config.json" + config_file.write_text(json.dumps(config_data)) + + overrides = ConfigOverride(provider="ollama", model="llama2") + config = await load_runtime_config_async(str(config_file), overrides) + + assert config.provider == "ollama" + assert config.model == "llama2" + + @pytest.mark.asyncio + async def test_load_runtime_config_async_nonexistent_file( + self, tmp_path, monkeypatch + ): + """Test async loading config when file doesn't exist.""" + monkeypatch.chdir(tmp_path) + + config = await load_runtime_config_async() + assert isinstance(config, RuntimeConfig) diff --git a/tests/config/test_config_manager.py b/tests/config/test_config_manager.py index fbb7d32b..0636dec0 100644 --- a/tests/config/test_config_manager.py +++ b/tests/config/test_config_manager.py @@ -6,6 +6,7 @@ import json import pytest +from unittest.mock import patch, MagicMock from mcp_cli.config.config_manager import ( ServerConfig, @@ -134,7 +135,7 @@ def test_mcp_config_defaults(self): config = MCPConfig() assert config.servers == {} assert config.default_provider == "openai" - assert config.default_model == "gpt-4" + assert config.default_model == "gpt-4o-mini" assert config.theme == "default" assert config.verbose is True assert config.confirm_tools is True @@ -683,3 +684,363 @@ def test_validate_multiple_errors(self, tmp_path): assert is_valid is False assert len(errors) == 2 + + +class TestMCPConfigLoadFromFileTimeouts: + """Test loading timeouts from config file.""" + + def test_load_with_timeouts(self, tmp_path): + """Test loading config with timeout configuration.""" + # LegacyMCPConfig parses timeouts section and creates TimeoutConfig + config_data = { + "mcpServers": {}, + "timeouts": { + "streamingChunkTimeout": 60.0, + "streamingGlobalTimeout": 600.0, + "streamingFirstChunkTimeout": 90.0, + "toolExecutionTimeout": 180.0, + "serverInitTimeout": 60.0, + "httpRequestTimeout": 45.0, + "httpConnectTimeout": 15.0, + }, + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config_data)) + + config = MCPConfig.load_from_file(config_file) + + # Verify timeouts object exists and has positive values + assert config.timeouts is not None + assert config.timeouts.streaming_chunk > 0 + assert config.timeouts.streaming_global > 0 + + def test_load_with_tools_config(self, tmp_path): + """Test loading config with tool configuration.""" + config_data = { + "mcpServers": {}, + "tools": { + "includeTools": ["tool1", "tool2"], + "excludeTools": ["bad_tool"], + "dynamicToolsEnabled": False, + "confirmTools": True, + "maxConcurrency": 5, + }, + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config_data)) + + config = MCPConfig.load_from_file(config_file) + + assert config.tools.include_tools == ["tool1", "tool2"] + assert config.tools.exclude_tools == ["bad_tool"] + assert config.tools.dynamic_tools_enabled is False + assert config.tools.confirm_tools is True + assert config.tools.max_concurrency == 5 + + def test_load_with_vault_non_defaults(self, tmp_path): + """Test saving config with non-default vault settings.""" + config = MCPConfig() + config.vault_mount_point = "kv" + config.vault_path_prefix = "custom/path" + config.vault_namespace = "prod" + + config_file = tmp_path / "vault_config.json" + config.save_to_file(config_file) + + with open(config_file) as f: + data = json.load(f) + + assert "tokenStorage" in data + assert data["tokenStorage"]["vaultMountPoint"] == "kv" + assert data["tokenStorage"]["vaultPathPrefix"] == "custom/path" + assert data["tokenStorage"]["vaultNamespace"] == "prod" + + def test_load_with_error_returns_empty_config(self, tmp_path): + """Test that loading invalid config returns default config.""" + config_file = tmp_path / "invalid.json" + config_file.write_text("{ invalid json }") + + config = MCPConfig.load_from_file(config_file) + # Should return default config rather than crashing + assert config is not None + assert config.servers == {} + + +class TestConfigManagerPackageFallback: + """Test ConfigManager package fallback behavior.""" + + def test_initialize_without_path_no_cwd_file(self, tmp_path, monkeypatch): + """Test initialize without path when no server_config.json in cwd.""" + monkeypatch.chdir(tmp_path) + # No server_config.json in tmp_path + + manager = ConfigManager() + manager.reset() + + # Mock the importlib.resources behavior + with patch("importlib.resources.files") as mock_files: + mock_package = MagicMock() + mock_config_file = MagicMock() + mock_config_file.is_file.return_value = False + mock_package.__truediv__ = MagicMock(return_value=mock_config_file) + mock_files.return_value = mock_package + + config = manager.initialize() + assert config is not None + + def test_initialize_with_cwd_file_priority(self, tmp_path, monkeypatch): + """Test that cwd config takes priority over bundled.""" + config_data = {"mcpServers": {"local-server": {"command": "local-cmd"}}} + config_file = tmp_path / "server_config.json" + config_file.write_text(json.dumps(config_data)) + + monkeypatch.chdir(tmp_path) + + manager = ConfigManager() + manager.reset() + + config = manager.initialize() + assert "local-server" in config.servers + + +class TestRuntimeConfigOld: + """Test legacy RuntimeConfig in config_manager.py.""" + + def test_runtime_config_creation(self): + """Test creating RuntimeConfig.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + assert runtime.mcp_config == mcp_config + assert runtime.cli_overrides == {} + + def test_runtime_config_with_cli_overrides(self): + """Test RuntimeConfig with CLI overrides.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + mcp_config = MCPConfig() + cli_overrides = {"streaming_chunk_timeout": 60.0} + runtime = LegacyRuntimeConfig(mcp_config, cli_overrides) + + assert runtime.cli_overrides == cli_overrides + + def test_get_timeout_from_cli(self): + """Test getting timeout from CLI overrides.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + mcp_config = MCPConfig() + cli_overrides = {"streaming_chunk_timeout": 75.0} + runtime = LegacyRuntimeConfig(mcp_config, cli_overrides) + + timeout = runtime.get_timeout("streaming_chunk") + assert timeout == 75.0 + + def test_get_timeout_from_env(self, monkeypatch): + """Test getting timeout from environment variable.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_STREAMING_CHUNK_TIMEOUT", "88.0") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + timeout = runtime.get_timeout("streaming_chunk") + assert timeout == 88.0 + + def test_get_timeout_from_tool_timeout_env(self, monkeypatch): + """Test MCP_TOOL_TIMEOUT applies to multiple timeouts.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "150.0") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + assert runtime.get_timeout("streaming_chunk") == 150.0 + assert runtime.get_timeout("streaming_global") == 150.0 + assert runtime.get_timeout("tool_execution") == 150.0 + + def test_get_timeout_invalid_env_value(self, monkeypatch): + """Test invalid env value falls back to config.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_STREAMING_CHUNK_TIMEOUT", "not_a_number") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + # Should not crash, should get config value + timeout = runtime.get_timeout("streaming_chunk") + assert timeout > 0 + + def test_get_timeout_fallback(self): + """Test timeout fallback when not found anywhere.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + # Non-existent timeout should fall back to 120.0 + timeout = runtime.get_timeout("nonexistent_timeout") + assert timeout == 120.0 + + def test_get_tool_config_value_from_cli(self): + """Test getting tool config from CLI overrides.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + mcp_config = MCPConfig() + cli_overrides = {"include_tools": ["tool1", "tool2"]} + runtime = LegacyRuntimeConfig(mcp_config, cli_overrides) + + tools = runtime.get_tool_config_value("include_tools") + assert tools == ["tool1", "tool2"] + + def test_get_tool_config_value_from_env_list(self, monkeypatch): + """Test getting tool list from environment.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_CLI_INCLUDE_TOOLS", "tool_a,tool_b,tool_c") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + tools = runtime.get_tool_config_value("include_tools") + assert tools == ["tool_a", "tool_b", "tool_c"] + + def test_get_tool_config_value_dynamic_tools_enabled(self, monkeypatch): + """Test getting dynamic_tools_enabled from env.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_CLI_DYNAMIC_TOOLS_ENABLED", "true") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + enabled = runtime.get_tool_config_value("dynamic_tools_enabled") + assert enabled is True + + def test_get_tool_config_value_confirm_tools_disabled(self, monkeypatch): + """Test getting confirm_tools disabled from env.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_CLI_CONFIRM_TOOLS", "false") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + confirm = runtime.get_tool_config_value("confirm_tools") + assert confirm is False + + def test_get_tool_config_value_max_concurrency(self, monkeypatch): + """Test getting max_concurrency from env.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_CLI_MAX_CONCURRENCY", "10") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + concurrency = runtime.get_tool_config_value("max_concurrency") + assert concurrency == 10 + + def test_get_tool_config_value_invalid_max_concurrency(self, monkeypatch): + """Test invalid max_concurrency falls back to config.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + monkeypatch.setenv("MCP_CLI_MAX_CONCURRENCY", "not_a_number") + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + concurrency = runtime.get_tool_config_value("max_concurrency") + # Should get from config, not crash + assert concurrency is not None + + def test_get_all_timeouts(self): + """Test getting all timeouts.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + all_timeouts = runtime.get_all_timeouts() + assert "streaming_chunk" in all_timeouts + assert "streaming_global" in all_timeouts + assert "tool_execution" in all_timeouts + assert "server_init" in all_timeouts + + def test_update_from_cli(self): + """Test update_from_cli method.""" + from mcp_cli.config.config_manager import RuntimeConfig as LegacyRuntimeConfig + + mcp_config = MCPConfig() + runtime = LegacyRuntimeConfig(mcp_config) + + runtime.update_from_cli(streaming_chunk_timeout=99.0, custom_key="value") + + assert runtime.cli_overrides["streaming_chunk_timeout"] == 99.0 + assert runtime.cli_overrides["custom_key"] == "value" + + +class TestGetRuntimeConfig: + """Test get_runtime_config function.""" + + def test_get_runtime_config_with_config(self): + """Test get_runtime_config with provided config.""" + from mcp_cli.config.config_manager import ( + get_runtime_config, + RuntimeConfig as LegacyRuntimeConfig, + ) + + mcp_config = MCPConfig() + runtime = get_runtime_config(mcp_config) + + assert isinstance(runtime, LegacyRuntimeConfig) + + def test_get_runtime_config_without_config(self, tmp_path, monkeypatch): + """Test get_runtime_config without config uses ConfigManager.""" + from mcp_cli.config.config_manager import ( + get_runtime_config, + RuntimeConfig as LegacyRuntimeConfig, + ) + + # Reset ConfigManager + manager = ConfigManager() + manager.reset() + + # This will create default config since ConfigManager not initialized + runtime = get_runtime_config() + + assert isinstance(runtime, LegacyRuntimeConfig) + + def test_get_runtime_config_with_cli_overrides(self): + """Test get_runtime_config with CLI overrides.""" + from mcp_cli.config.config_manager import get_runtime_config + + mcp_config = MCPConfig() + cli_overrides = {"timeout": 60.0} + runtime = get_runtime_config(mcp_config, cli_overrides) + + assert runtime.cli_overrides == cli_overrides + + +class TestServerConfigOAuth: + """Test ServerConfig OAuth handling.""" + + def test_from_dict_with_oauth_dict(self): + """Test ServerConfig.from_dict when oauth is provided as dict.""" + data = { + "command": "python", + "oauth": { + "client_id": "test", + "authorization_url": "https://auth.example.com/authorize", + "token_url": "https://auth.example.com/token", + }, + } + + config = ServerConfig.from_dict("test", data) + assert config.command == "python" + assert config.oauth is not None + assert config.oauth.client_id == "test" diff --git a/tests/config/test_config_models.py b/tests/config/test_config_models.py new file mode 100644 index 00000000..9ca95417 --- /dev/null +++ b/tests/config/test_config_models.py @@ -0,0 +1,317 @@ +# tests/config/test_models.py +"""Tests for config/models.py module.""" + +import json +import pytest +from pydantic import ValidationError + +from mcp_cli.config.models import ( + TimeoutConfig, + ToolConfig, + VaultConfig, + TokenStorageConfig, + MCPConfig, + ConfigOverride, +) +from mcp_cli.config.enums import TimeoutType, TokenBackend + + +class TestTimeoutConfig: + """Test TimeoutConfig model.""" + + def test_default_values(self): + """Test TimeoutConfig default values.""" + config = TimeoutConfig() + assert config.streaming_chunk > 0 + assert config.streaming_global > 0 + assert config.streaming_first_chunk > 0 + assert config.tool_execution > 0 + assert config.server_init > 0 + assert config.http_request > 0 + assert config.http_connect > 0 + + def test_custom_values(self): + """Test TimeoutConfig with custom values.""" + config = TimeoutConfig( + streaming_chunk=60.0, + streaming_global=600.0, + tool_execution=120.0, + ) + assert config.streaming_chunk == 60.0 + assert config.streaming_global == 600.0 + assert config.tool_execution == 120.0 + + def test_get_timeout_by_enum(self): + """Test getting timeout by TimeoutType enum.""" + config = TimeoutConfig(streaming_chunk=45.0, tool_execution=90.0) + + assert config.get(TimeoutType.STREAMING_CHUNK) == 45.0 + assert config.get(TimeoutType.TOOL_EXECUTION) == 90.0 + + @pytest.mark.asyncio + async def test_get_async(self): + """Test async getter.""" + config = TimeoutConfig(streaming_chunk=55.0) + result = await config.get_async(TimeoutType.STREAMING_CHUNK) + assert result == 55.0 + + def test_immutable(self): + """Test TimeoutConfig is immutable.""" + config = TimeoutConfig() + with pytest.raises(ValidationError): + config.streaming_chunk = 100.0 + + def test_validation_positive_values(self): + """Test validation requires positive values.""" + with pytest.raises(ValidationError): + TimeoutConfig(streaming_chunk=-1.0) + + with pytest.raises(ValidationError): + TimeoutConfig(streaming_chunk=0) + + +class TestToolConfig: + """Test ToolConfig model.""" + + def test_default_values(self): + """Test ToolConfig default values.""" + config = ToolConfig() + assert config.include_tools is None + assert config.exclude_tools is None + assert config.dynamic_tools_enabled is not None # Has default + assert config.confirm_tools is not None # Has default + assert config.max_concurrency > 0 + + def test_custom_values(self): + """Test ToolConfig with custom values.""" + config = ToolConfig( + include_tools=["tool1", "tool2"], + exclude_tools=["bad_tool"], + dynamic_tools_enabled=True, + confirm_tools=False, + max_concurrency=10, + ) + assert config.include_tools == ["tool1", "tool2"] + assert config.exclude_tools == ["bad_tool"] + assert config.dynamic_tools_enabled is True + assert config.confirm_tools is False + assert config.max_concurrency == 10 + + def test_empty_list_becomes_none(self): + """Test that empty tool lists become None.""" + config = ToolConfig(include_tools=[], exclude_tools=[]) + assert config.include_tools is None + assert config.exclude_tools is None + + def test_max_concurrency_validation(self): + """Test max_concurrency validation.""" + # Must be positive + with pytest.raises(ValidationError): + ToolConfig(max_concurrency=0) + + # Must be <= 100 + with pytest.raises(ValidationError): + ToolConfig(max_concurrency=101) + + +class TestVaultConfig: + """Test VaultConfig model.""" + + def test_default_values(self): + """Test VaultConfig default values.""" + config = VaultConfig() + assert config.url is None + assert config.token is None + assert config.mount_point == "secret" + assert config.path_prefix == "mcp-cli/oauth" + assert config.namespace is None + + def test_custom_values(self): + """Test VaultConfig with custom values.""" + config = VaultConfig( + url="https://vault.example.com", + token="vault-token-123", + mount_point="kv", + path_prefix="app/tokens", + namespace="production", + ) + assert config.url == "https://vault.example.com" + assert config.token == "vault-token-123" + assert config.mount_point == "kv" + assert config.path_prefix == "app/tokens" + assert config.namespace == "production" + + +class TestTokenStorageConfig: + """Test TokenStorageConfig model.""" + + def test_default_values(self): + """Test TokenStorageConfig default values.""" + config = TokenStorageConfig() + assert config.backend == TokenBackend.AUTO + assert config.password is None + assert config.vault is not None + + def test_with_vault_config(self): + """Test TokenStorageConfig with vault config.""" + vault = VaultConfig(url="https://vault.test.com") + config = TokenStorageConfig( + backend=TokenBackend.VAULT, + vault=vault, + ) + assert config.backend == TokenBackend.VAULT + assert config.vault.url == "https://vault.test.com" + + def test_with_password(self): + """Test TokenStorageConfig with password.""" + config = TokenStorageConfig( + backend=TokenBackend.ENCRYPTED, + password="secret123", + ) + assert config.backend == TokenBackend.ENCRYPTED + assert config.password == "secret123" + + +class TestMCPConfig: + """Test MCPConfig model.""" + + def test_default_values(self): + """Test MCPConfig default values.""" + config = MCPConfig() + assert config.default_provider is not None + assert config.default_model is not None + assert config.theme is not None + assert config.verbose is not None + assert isinstance(config.timeouts, TimeoutConfig) + assert isinstance(config.tools, ToolConfig) + assert isinstance(config.token_storage, TokenStorageConfig) + assert config.servers == {} + + def test_custom_values(self): + """Test MCPConfig with custom values.""" + config = MCPConfig( + default_provider="anthropic", + default_model="claude-3", + theme="dark", + verbose=False, + ) + assert config.default_provider == "anthropic" + assert config.default_model == "claude-3" + assert config.theme == "dark" + assert config.verbose is False + + def test_load_sync_nonexistent(self, tmp_path): + """Test load_sync with nonexistent file returns defaults.""" + config = MCPConfig.load_sync(tmp_path / "nonexistent.json") + assert config.default_provider is not None + + def test_load_sync_valid_file(self, tmp_path): + """Test load_sync with valid file.""" + config_data = { + "defaultProvider": "openai", + "defaultModel": "gpt-4-turbo", + "theme": "light", + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config_data)) + + config = MCPConfig.load_sync(config_file) + # Note: MCPConfig uses snake_case internally but JSON uses camelCase + # The load might not map camelCase to snake_case automatically + assert config is not None + + def test_load_from_file_alias(self, tmp_path): + """Test load_from_file alias for backward compatibility.""" + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps({})) + + config = MCPConfig.load_from_file(config_file) + assert config is not None + + @pytest.mark.asyncio + async def test_load_async_nonexistent(self, tmp_path): + """Test load_async with nonexistent file returns defaults.""" + config = await MCPConfig.load_async(tmp_path / "nonexistent.json") + assert config.default_provider is not None + + @pytest.mark.asyncio + async def test_load_async_valid_file(self, tmp_path): + """Test load_async with valid file.""" + config_data = {"defaultProvider": "anthropic"} + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config_data)) + + config = await MCPConfig.load_async(config_file) + assert config is not None + + def test_servers_alias(self, tmp_path): + """Test that mcpServers alias works.""" + config_data = {"mcpServers": {"test-server": {"command": "python"}}} + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config_data)) + + config = MCPConfig.load_sync(config_file) + assert "test-server" in config.servers + + +class TestConfigOverride: + """Test ConfigOverride model.""" + + def test_default_values(self): + """Test ConfigOverride default values.""" + override = ConfigOverride() + assert override.timeouts == {} + assert override.tools == {} + assert override.provider is None + assert override.model is None + assert override.theme is None + + def test_custom_values(self): + """Test ConfigOverride with custom values.""" + override = ConfigOverride( + provider="anthropic", + model="claude-3-opus", + theme="dark", + ) + assert override.provider == "anthropic" + assert override.model == "claude-3-opus" + assert override.theme == "dark" + + def test_set_timeout(self): + """Test set_timeout method.""" + override = ConfigOverride() + override.set_timeout(TimeoutType.STREAMING_CHUNK, 60.0) + + assert TimeoutType.STREAMING_CHUNK in override.timeouts + assert override.timeouts[TimeoutType.STREAMING_CHUNK] == 60.0 + + def test_set_timeout_validation(self): + """Test set_timeout validates positive values.""" + override = ConfigOverride() + + with pytest.raises(ValueError) as exc_info: + override.set_timeout(TimeoutType.STREAMING_CHUNK, -1.0) + assert "positive" in str(exc_info.value).lower() + + with pytest.raises(ValueError): + override.set_timeout(TimeoutType.STREAMING_CHUNK, 0) + + def test_apply_tool_timeout_to_all(self): + """Test apply_tool_timeout_to_all method.""" + override = ConfigOverride() + override.apply_tool_timeout_to_all(90.0) + + assert override.timeouts[TimeoutType.STREAMING_CHUNK] == 90.0 + assert override.timeouts[TimeoutType.STREAMING_GLOBAL] == 90.0 + assert override.timeouts[TimeoutType.TOOL_EXECUTION] == 90.0 + + def test_mutable(self): + """Test ConfigOverride is mutable.""" + override = ConfigOverride() + override.provider = "test" + override.model = "test-model" + override.tools["key"] = "value" + + assert override.provider == "test" + assert override.model == "test-model" + assert override.tools["key"] == "value" diff --git a/tests/config/test_config_server_models.py b/tests/config/test_config_server_models.py new file mode 100644 index 00000000..78f1cac3 --- /dev/null +++ b/tests/config/test_config_server_models.py @@ -0,0 +1,395 @@ +# tests/config/test_server_models.py +"""Tests for config/server_models.py module.""" + +import pytest +from pydantic import ValidationError + +from mcp_cli.config.server_models import ( + HTTPServerConfig, + STDIOServerConfig, + OAuthConfig, + UnifiedServerConfig, + ServerConfigInput, +) + + +class TestHTTPServerConfig: + """Test HTTPServerConfig model.""" + + def test_valid_http_config(self): + """Test creating valid HTTP server config.""" + config = HTTPServerConfig( + name="test-http", + url="http://localhost:8080", + ) + assert config.name == "test-http" + assert config.url == "http://localhost:8080" + assert config.headers == {} + assert config.disabled is False + + def test_http_config_with_https(self): + """Test HTTP config with HTTPS URL.""" + config = HTTPServerConfig( + name="secure-server", + url="https://api.example.com", + ) + assert config.url == "https://api.example.com" + + def test_http_config_with_headers(self): + """Test HTTP config with headers.""" + config = HTTPServerConfig( + name="with-headers", + url="http://localhost:8080", + headers={"Authorization": "Bearer token123"}, + ) + assert config.headers["Authorization"] == "Bearer token123" + + def test_http_config_disabled(self): + """Test HTTP config disabled flag.""" + config = HTTPServerConfig( + name="disabled-server", + url="http://localhost:8080", + disabled=True, + ) + assert config.disabled is True + + def test_http_config_invalid_url(self): + """Test HTTP config with invalid URL.""" + with pytest.raises(ValidationError) as exc_info: + HTTPServerConfig( + name="bad-url", + url="ftp://localhost:8080", # Not http/https + ) + assert "http://" in str(exc_info.value) or "URL" in str(exc_info.value) + + def test_http_config_immutable(self): + """Test HTTP config is immutable.""" + config = HTTPServerConfig(name="test", url="http://localhost:8080") + with pytest.raises(ValidationError): + config.name = "new-name" + + +class TestSTDIOServerConfig: + """Test STDIOServerConfig model.""" + + def test_valid_stdio_config(self): + """Test creating valid STDIO server config.""" + config = STDIOServerConfig( + name="stdio-server", + command="python", + ) + assert config.name == "stdio-server" + assert config.command == "python" + assert config.args == [] + assert config.env == {} + assert config.disabled is False + + def test_stdio_config_with_args(self): + """Test STDIO config with arguments.""" + config = STDIOServerConfig( + name="python-server", + command="python", + args=["-m", "server"], + ) + assert config.args == ["-m", "server"] + + def test_stdio_config_with_env(self): + """Test STDIO config with environment variables.""" + config = STDIOServerConfig( + name="env-server", + command="node", + env={"NODE_ENV": "production"}, + ) + assert config.env["NODE_ENV"] == "production" + + def test_stdio_config_empty_command(self): + """Test STDIO config with empty command.""" + with pytest.raises(ValidationError) as exc_info: + STDIOServerConfig( + name="bad-server", + command=" ", # Empty/whitespace only + ) + assert "empty" in str(exc_info.value).lower() + + def test_stdio_config_command_stripped(self): + """Test STDIO config command is stripped.""" + config = STDIOServerConfig( + name="test", + command=" python ", + ) + assert config.command == "python" + + +class TestOAuthConfig: + """Test OAuthConfig model.""" + + def test_valid_oauth_config(self): + """Test creating valid OAuth config.""" + config = OAuthConfig( + client_id="test-client", + authorization_url="https://auth.example.com/authorize", + token_url="https://auth.example.com/token", + ) + assert config.client_id == "test-client" + assert config.client_secret is None + assert config.scopes == [] + assert config.redirect_uri == "http://localhost:8080/callback" + + def test_oauth_config_with_secret(self): + """Test OAuth config with client secret.""" + config = OAuthConfig( + client_id="test-client", + client_secret="secret123", + authorization_url="https://auth.example.com/authorize", + token_url="https://auth.example.com/token", + ) + assert config.client_secret == "secret123" + + def test_oauth_config_with_scopes(self): + """Test OAuth config with scopes.""" + config = OAuthConfig( + client_id="test-client", + authorization_url="https://auth.example.com/authorize", + token_url="https://auth.example.com/token", + scopes=["read", "write", "admin"], + ) + assert config.scopes == ["read", "write", "admin"] + + def test_oauth_config_custom_redirect_uri(self): + """Test OAuth config with custom redirect URI.""" + config = OAuthConfig( + client_id="test-client", + authorization_url="https://auth.example.com/authorize", + token_url="https://auth.example.com/token", + redirect_uri="http://localhost:9000/oauth/callback", + ) + assert config.redirect_uri == "http://localhost:9000/oauth/callback" + + +class TestUnifiedServerConfig: + """Test UnifiedServerConfig model.""" + + def test_http_server_config(self): + """Test creating HTTP server via UnifiedServerConfig.""" + config = UnifiedServerConfig( + name="http-server", + url="http://localhost:8080", + ) + assert config.name == "http-server" + assert config.url == "http://localhost:8080" + assert config.is_http is True + assert config.is_stdio is False + + def test_stdio_server_config(self): + """Test creating STDIO server via UnifiedServerConfig.""" + config = UnifiedServerConfig( + name="stdio-server", + command="python", + args=["-m", "server"], + ) + assert config.name == "stdio-server" + assert config.command == "python" + assert config.is_stdio is True + assert config.is_http is False + + def test_empty_name_validation(self): + """Test that empty name is rejected.""" + with pytest.raises(ValidationError) as exc_info: + UnifiedServerConfig( + name=" ", # Empty/whitespace + command="python", + ) + assert "empty" in str(exc_info.value).lower() + + def test_url_format_validation(self): + """Test URL format validation.""" + with pytest.raises(ValidationError): + UnifiedServerConfig( + name="bad-url-server", + url="ftp://localhost:8080", + ) + + def test_empty_command_validation(self): + """Test empty command validation.""" + with pytest.raises(ValidationError): + UnifiedServerConfig( + name="empty-cmd-server", + command=" ", + ) + + def test_neither_url_nor_command_error(self): + """Test error when neither url nor command provided.""" + with pytest.raises(ValidationError) as exc_info: + UnifiedServerConfig(name="bad-server") + assert ( + "url" in str(exc_info.value).lower() + or "command" in str(exc_info.value).lower() + ) + + def test_both_url_and_command_error(self): + """Test error when both url and command provided.""" + with pytest.raises(ValidationError) as exc_info: + UnifiedServerConfig( + name="both-server", + url="http://localhost:8080", + command="python", + ) + assert "both" in str(exc_info.value).lower() + + def test_to_http_config(self): + """Test converting to HTTPServerConfig.""" + unified = UnifiedServerConfig( + name="http-test", + url="http://localhost:8080", + headers={"Auth": "token"}, + disabled=True, + ) + http_config = unified.to_http_config() + + assert isinstance(http_config, HTTPServerConfig) + assert http_config.name == "http-test" + assert http_config.url == "http://localhost:8080" + assert http_config.headers["Auth"] == "token" + assert http_config.disabled is True + + def test_to_http_config_error_for_stdio(self): + """Test to_http_config raises error for STDIO server.""" + unified = UnifiedServerConfig( + name="stdio-test", + command="python", + ) + with pytest.raises(ValueError) as exc_info: + unified.to_http_config() + assert "not an HTTP server" in str(exc_info.value) + + def test_to_stdio_config(self): + """Test converting to STDIOServerConfig.""" + unified = UnifiedServerConfig( + name="stdio-test", + command="python", + args=["-m", "server"], + env={"KEY": "value"}, + disabled=True, + ) + stdio_config = unified.to_stdio_config() + + assert isinstance(stdio_config, STDIOServerConfig) + assert stdio_config.name == "stdio-test" + assert stdio_config.command == "python" + assert stdio_config.args == ["-m", "server"] + assert stdio_config.env["KEY"] == "value" + assert stdio_config.disabled is True + + def test_to_stdio_config_error_for_http(self): + """Test to_stdio_config raises error for HTTP server.""" + unified = UnifiedServerConfig( + name="http-test", + url="http://localhost:8080", + ) + with pytest.raises(ValueError) as exc_info: + unified.to_stdio_config() + assert "not a STDIO server" in str(exc_info.value) + + def test_unified_config_with_oauth(self): + """Test UnifiedServerConfig with OAuth.""" + oauth = OAuthConfig( + client_id="test", + authorization_url="https://auth.example.com/authorize", + token_url="https://auth.example.com/token", + ) + config = UnifiedServerConfig( + name="oauth-server", + url="http://localhost:8080", + oauth=oauth, + ) + assert config.oauth is not None + assert config.oauth.client_id == "test" + + +class TestServerConfigInput: + """Test ServerConfigInput model.""" + + def test_basic_stdio_input(self): + """Test basic STDIO server input.""" + input_config = ServerConfigInput( + command="python", + args=["-m", "server"], + ) + unified = input_config.to_unified("test-server") + + assert unified.name == "test-server" + assert unified.command == "python" + assert unified.args == ["-m", "server"] + + def test_basic_http_input(self): + """Test basic HTTP server input.""" + input_config = ServerConfigInput( + url="http://localhost:8080", + ) + unified = input_config.to_unified("http-server") + + assert unified.name == "http-server" + assert unified.url == "http://localhost:8080" + + def test_input_with_dict_oauth(self): + """Test input with OAuth as dict.""" + input_config = ServerConfigInput( + url="http://localhost:8080", + oauth={ + "client_id": "test-client", + "authorization_url": "https://auth.example.com/authorize", + "token_url": "https://auth.example.com/token", + }, + ) + unified = input_config.to_unified("oauth-server") + + assert unified.oauth is not None + assert unified.oauth.client_id == "test-client" + + def test_input_with_oauth_object(self): + """Test input with OAuth as OAuthConfig object.""" + oauth = OAuthConfig( + client_id="test-client", + authorization_url="https://auth.example.com/authorize", + token_url="https://auth.example.com/token", + ) + input_config = ServerConfigInput( + url="http://localhost:8080", + oauth=oauth, + ) + unified = input_config.to_unified("oauth-server") + + assert unified.oauth is not None + assert unified.oauth.client_id == "test-client" + + def test_input_with_env_and_headers(self): + """Test input with env vars and headers.""" + input_config = ServerConfigInput( + command="python", + env={"KEY": "value"}, + ) + unified = input_config.to_unified("env-server") + + assert unified.env["KEY"] == "value" + + def test_input_ignores_extra_fields(self): + """Test that extra fields are ignored.""" + # This tests the extra="ignore" config + input_config = ServerConfigInput.model_validate( + { + "command": "python", + "extra_field": "ignored", + "another_extra": 123, + } + ) + unified = input_config.to_unified("test") + assert unified.command == "python" + + def test_input_disabled_flag(self): + """Test disabled flag in input.""" + input_config = ServerConfigInput( + command="python", + disabled=True, + ) + unified = input_config.to_unified("disabled-server") + assert unified.disabled is True diff --git a/tests/config/test_discovery.py b/tests/config/test_discovery.py index c71f8c99..6831d080 100644 --- a/tests/config/test_discovery.py +++ b/tests/config/test_discovery.py @@ -7,6 +7,8 @@ import os from unittest.mock import patch, MagicMock +import pytest + from mcp_cli.config.discovery import ( setup_chuk_llm_environment, trigger_discovery_after_setup, @@ -14,9 +16,23 @@ validate_provider_exists, get_discovery_status, force_discovery_refresh, + get_discovery_manager, ) +@pytest.fixture(autouse=True) +def reset_discovery_manager(): + """Reset the DiscoveryManager singleton state before each test.""" + manager = get_discovery_manager() + # Reset internal state + manager._env_setup_complete = False + manager._discovery_triggered = False + yield + # Reset again after test + manager._env_setup_complete = False + manager._discovery_triggered = False + + class TestSetupChukLlmEnvironment: """Test setup_chuk_llm_environment function.""" @@ -36,11 +52,6 @@ def test_setup_environment_first_time(self, monkeypatch): for var in env_vars_to_clear: monkeypatch.delenv(var, raising=False) - # Reset the module state - import mcp_cli.config.discovery as discovery_module - - discovery_module._ENV_SETUP_COMPLETE = False - setup_chuk_llm_environment() assert os.environ["CHUK_LLM_DISCOVERY_ENABLED"] == "true" @@ -56,9 +67,9 @@ def test_setup_environment_already_complete(self, monkeypatch): """Test that setup doesn't overwrite when already complete.""" monkeypatch.setenv("CHUK_LLM_DISCOVERY_ENABLED", "false") # User value - import mcp_cli.config.discovery as discovery_module - - discovery_module._ENV_SETUP_COMPLETE = True + # Mark as already complete + manager = get_discovery_manager() + manager._env_setup_complete = True setup_chuk_llm_environment() @@ -69,10 +80,6 @@ def test_setup_preserves_existing_env_vars(self, monkeypatch): """Test that setup preserves user-set environment variables.""" monkeypatch.setenv("CHUK_LLM_DISCOVERY_TIMEOUT", "30") # User override - import mcp_cli.config.discovery as discovery_module - - discovery_module._ENV_SETUP_COMPLETE = False - setup_chuk_llm_environment() # Should preserve user's override @@ -82,18 +89,13 @@ def test_setup_preserves_existing_env_vars(self, monkeypatch): class TestTriggerDiscoveryAfterSetup: """Test trigger_discovery_after_setup function.""" - def test_trigger_discovery_success(self, monkeypatch): + def test_trigger_discovery_success(self): """Test successful discovery trigger.""" - # Reset discovery state - import mcp_cli.config.discovery as discovery_module - - discovery_module._DISCOVERY_TRIGGERED = False - # Mock the discovery function mock_discovery = MagicMock(return_value=["func1", "func2", "func3"]) with patch( - "chuk_llm.api.providers.trigger_ollama_discovery_and_refresh", + "chuk_llm.api.providers.refresh_provider_functions", mock_discovery, ): count = trigger_discovery_after_setup() @@ -103,9 +105,9 @@ def test_trigger_discovery_success(self, monkeypatch): def test_trigger_discovery_already_triggered(self): """Test that discovery doesn't run twice.""" - import mcp_cli.config.discovery as discovery_module - - discovery_module._DISCOVERY_TRIGGERED = True + # Mark as already triggered + manager = get_discovery_manager() + manager._discovery_triggered = True count = trigger_discovery_after_setup() @@ -113,14 +115,10 @@ def test_trigger_discovery_already_triggered(self): def test_trigger_discovery_no_new_functions(self): """Test discovery with no new functions.""" - import mcp_cli.config.discovery as discovery_module - - discovery_module._DISCOVERY_TRIGGERED = False - mock_discovery = MagicMock(return_value=[]) with patch( - "chuk_llm.api.providers.trigger_ollama_discovery_and_refresh", + "chuk_llm.api.providers.refresh_provider_functions", mock_discovery, ): count = trigger_discovery_after_setup() @@ -129,14 +127,10 @@ def test_trigger_discovery_no_new_functions(self): def test_trigger_discovery_exception(self): """Test discovery with exception.""" - import mcp_cli.config.discovery as discovery_module - - discovery_module._DISCOVERY_TRIGGERED = False - mock_discovery = MagicMock(side_effect=Exception("Discovery failed")) with patch( - "chuk_llm.api.providers.trigger_ollama_discovery_and_refresh", + "chuk_llm.api.providers.refresh_provider_functions", mock_discovery, ): count = trigger_discovery_after_setup() @@ -231,10 +225,10 @@ class TestGetDiscoveryStatus: def test_get_discovery_status_complete(self, monkeypatch): """Test getting status when discovery is complete.""" - import mcp_cli.config.discovery as discovery_module - - discovery_module._ENV_SETUP_COMPLETE = True - discovery_module._DISCOVERY_TRIGGERED = True + # Set manager state + manager = get_discovery_manager() + manager._env_setup_complete = True + manager._discovery_triggered = True monkeypatch.setenv("CHUK_LLM_DISCOVERY_ENABLED", "true") monkeypatch.setenv("CHUK_LLM_OLLAMA_DISCOVERY", "true") @@ -246,19 +240,14 @@ def test_get_discovery_status_complete(self, monkeypatch): assert status["env_setup_complete"] is True assert status["discovery_triggered"] is True - assert status["discovery_enabled"] == "true" - assert status["ollama_discovery"] == "true" - assert status["auto_discover"] == "true" - assert status["tool_compatibility"] == "true" - assert status["universal_tools"] == "true" + assert status["discovery_enabled"] is True + assert status["ollama_discovery"] is True + assert status["auto_discover"] is True + assert status["tool_compatibility"] is True + assert status["universal_tools"] is True def test_get_discovery_status_incomplete(self, monkeypatch): """Test getting status when discovery is not complete.""" - import mcp_cli.config.discovery as discovery_module - - discovery_module._ENV_SETUP_COMPLETE = False - discovery_module._DISCOVERY_TRIGGERED = False - # Clear env vars for key in ["CHUK_LLM_DISCOVERY_ENABLED", "CHUK_LLM_OLLAMA_DISCOVERY"]: monkeypatch.delenv(key, raising=False) @@ -267,8 +256,8 @@ def test_get_discovery_status_incomplete(self, monkeypatch): assert status["env_setup_complete"] is False assert status["discovery_triggered"] is False - assert status["discovery_enabled"] == "false" # Default - assert status["ollama_discovery"] == "false" # Default + assert status["discovery_enabled"] is False # Default + assert status["ollama_discovery"] is False # Default class TestForceDiscoveryRefresh: @@ -276,14 +265,14 @@ class TestForceDiscoveryRefresh: def test_force_refresh_success(self): """Test forcing discovery refresh.""" - import mcp_cli.config.discovery as discovery_module - - discovery_module._DISCOVERY_TRIGGERED = True # Already triggered + # Mark as already triggered + manager = get_discovery_manager() + manager._discovery_triggered = True mock_discovery = MagicMock(return_value=["func1", "func2"]) with patch( - "chuk_llm.api.providers.trigger_ollama_discovery_and_refresh", + "chuk_llm.api.providers.refresh_provider_functions", mock_discovery, ): count = force_discovery_refresh() @@ -293,14 +282,14 @@ def test_force_refresh_success(self): def test_force_refresh_resets_flag(self): """Test that force refresh resets the triggered flag.""" - import mcp_cli.config.discovery as discovery_module - - discovery_module._DISCOVERY_TRIGGERED = True + # Mark as already triggered + manager = get_discovery_manager() + manager._discovery_triggered = True mock_discovery = MagicMock(return_value=["func1"]) with patch( - "chuk_llm.api.providers.trigger_ollama_discovery_and_refresh", + "chuk_llm.api.providers.refresh_provider_functions", mock_discovery, ): count = force_discovery_refresh() diff --git a/tests/config/test_env_vars.py b/tests/config/test_env_vars.py new file mode 100644 index 00000000..27422f72 --- /dev/null +++ b/tests/config/test_env_vars.py @@ -0,0 +1,260 @@ +# tests/config/test_env_vars.py +"""Tests for environment variable helpers.""" + +from __future__ import annotations + +import os + +import pytest + +from mcp_cli.config.env_vars import ( + EnvVar, + get_env, + get_env_bool, + get_env_float, + get_env_int, + get_env_list, + is_set, + set_env, + unset_env, +) + + +class TestEnvVar: + """Tests for EnvVar enum.""" + + def test_env_var_values(self) -> None: + """Test that EnvVar members have expected string values.""" + assert EnvVar.TOOL_TIMEOUT.value == "MCP_TOOL_TIMEOUT" + assert EnvVar.LLM_PROVIDER.value == "LLM_PROVIDER" + assert EnvVar.PATH.value == "PATH" + + def test_env_var_is_string_enum(self) -> None: + """Test that EnvVar values can be used as strings.""" + # The .value attribute gives the string value + assert EnvVar.TOOL_TIMEOUT.value == "MCP_TOOL_TIMEOUT" + # EnvVar inherits from str, so it can be used in string contexts + assert f"{EnvVar.TOOL_TIMEOUT.value}" == "MCP_TOOL_TIMEOUT" + + +class TestGetEnv: + """Tests for get_env function.""" + + def test_get_env_returns_value(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Test get_env returns environment variable value.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "300") + assert get_env(EnvVar.TOOL_TIMEOUT) == "300" + + def test_get_env_returns_default_when_not_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env returns default when var not set.""" + monkeypatch.delenv("MCP_TOOL_TIMEOUT", raising=False) + assert get_env(EnvVar.TOOL_TIMEOUT, "120") == "120" + + def test_get_env_returns_none_when_not_set_no_default( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env returns None when var not set and no default.""" + monkeypatch.delenv("MCP_TOOL_TIMEOUT", raising=False) + assert get_env(EnvVar.TOOL_TIMEOUT) is None + + +class TestSetEnv: + """Tests for set_env function.""" + + def test_set_env_sets_value(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Test set_env sets environment variable.""" + monkeypatch.delenv("MCP_TOOL_TIMEOUT", raising=False) + set_env(EnvVar.TOOL_TIMEOUT, "600") + assert os.environ["MCP_TOOL_TIMEOUT"] == "600" + + def test_set_env_overwrites_existing(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Test set_env overwrites existing value.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "100") + set_env(EnvVar.TOOL_TIMEOUT, "200") + assert os.environ["MCP_TOOL_TIMEOUT"] == "200" + + +class TestUnsetEnv: + """Tests for unset_env function.""" + + def test_unset_env_removes_variable(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Test unset_env removes environment variable.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "300") + unset_env(EnvVar.TOOL_TIMEOUT) + assert "MCP_TOOL_TIMEOUT" not in os.environ + + def test_unset_env_no_error_when_not_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test unset_env doesn't error when var not set.""" + monkeypatch.delenv("MCP_TOOL_TIMEOUT", raising=False) + # Should not raise + unset_env(EnvVar.TOOL_TIMEOUT) + assert "MCP_TOOL_TIMEOUT" not in os.environ + + +class TestIsSet: + """Tests for is_set function.""" + + def test_is_set_returns_true_when_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test is_set returns True when variable is set.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "300") + assert is_set(EnvVar.TOOL_TIMEOUT) is True + + def test_is_set_returns_true_for_empty_string( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test is_set returns True even for empty string value.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "") + assert is_set(EnvVar.TOOL_TIMEOUT) is True + + def test_is_set_returns_false_when_not_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test is_set returns False when variable is not set.""" + monkeypatch.delenv("MCP_TOOL_TIMEOUT", raising=False) + assert is_set(EnvVar.TOOL_TIMEOUT) is False + + +class TestGetEnvInt: + """Tests for get_env_int function.""" + + def test_get_env_int_returns_int(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Test get_env_int returns integer value.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "300") + assert get_env_int(EnvVar.TOOL_TIMEOUT) == 300 + + def test_get_env_int_returns_default_when_not_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_int returns default when var not set.""" + monkeypatch.delenv("MCP_TOOL_TIMEOUT", raising=False) + assert get_env_int(EnvVar.TOOL_TIMEOUT, 120) == 120 + + def test_get_env_int_returns_default_for_invalid_value( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_int returns default for invalid integer.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "not_a_number") + assert get_env_int(EnvVar.TOOL_TIMEOUT, 120) == 120 + + def test_get_env_int_returns_none_for_invalid_no_default( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_int returns None for invalid value with no default.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "invalid") + assert get_env_int(EnvVar.TOOL_TIMEOUT) is None + + +class TestGetEnvFloat: + """Tests for get_env_float function.""" + + def test_get_env_float_returns_float(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Test get_env_float returns float value.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "120.5") + assert get_env_float(EnvVar.TOOL_TIMEOUT) == 120.5 + + def test_get_env_float_returns_default_when_not_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_float returns default when var not set.""" + monkeypatch.delenv("MCP_TOOL_TIMEOUT", raising=False) + assert get_env_float(EnvVar.TOOL_TIMEOUT, 60.0) == 60.0 + + def test_get_env_float_returns_default_for_invalid_value( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_float returns default for invalid float.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "not_a_float") + assert get_env_float(EnvVar.TOOL_TIMEOUT, 60.0) == 60.0 + + def test_get_env_float_returns_none_for_invalid_no_default( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_float returns None for invalid value with no default.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "invalid") + assert get_env_float(EnvVar.TOOL_TIMEOUT) is None + + +class TestGetEnvBool: + """Tests for get_env_bool function.""" + + @pytest.mark.parametrize("value", ["1", "true", "True", "TRUE", "yes", "YES", "on"]) + def test_get_env_bool_truthy_values( + self, monkeypatch: pytest.MonkeyPatch, value: str + ) -> None: + """Test get_env_bool returns True for truthy values.""" + monkeypatch.setenv("MCP_CLI_DYNAMIC_TOOLS", value) + assert get_env_bool(EnvVar.CLI_DYNAMIC_TOOLS) is True + + @pytest.mark.parametrize("value", ["0", "false", "False", "no", "off", ""]) + def test_get_env_bool_falsy_values( + self, monkeypatch: pytest.MonkeyPatch, value: str + ) -> None: + """Test get_env_bool returns False for falsy values.""" + monkeypatch.setenv("MCP_CLI_DYNAMIC_TOOLS", value) + assert get_env_bool(EnvVar.CLI_DYNAMIC_TOOLS) is False + + def test_get_env_bool_returns_default_when_not_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_bool returns default when var not set.""" + monkeypatch.delenv("MCP_CLI_DYNAMIC_TOOLS", raising=False) + assert get_env_bool(EnvVar.CLI_DYNAMIC_TOOLS, True) is True + assert get_env_bool(EnvVar.CLI_DYNAMIC_TOOLS, False) is False + + +class TestGetEnvList: + """Tests for get_env_list function.""" + + def test_get_env_list_splits_on_comma( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_list splits on comma by default.""" + monkeypatch.setenv("MCP_CLI_INCLUDE_TOOLS", "tool1,tool2,tool3") + result = get_env_list(EnvVar.CLI_INCLUDE_TOOLS) + assert result == ["tool1", "tool2", "tool3"] + + def test_get_env_list_strips_whitespace( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_list strips whitespace from items.""" + monkeypatch.setenv("MCP_CLI_INCLUDE_TOOLS", "tool1 , tool2 , tool3 ") + result = get_env_list(EnvVar.CLI_INCLUDE_TOOLS) + assert result == ["tool1", "tool2", "tool3"] + + def test_get_env_list_custom_separator( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_list with custom separator.""" + monkeypatch.setenv("MCP_CLI_INCLUDE_TOOLS", "tool1:tool2:tool3") + result = get_env_list(EnvVar.CLI_INCLUDE_TOOLS, separator=":") + assert result == ["tool1", "tool2", "tool3"] + + def test_get_env_list_returns_default_when_not_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_list returns default when var not set.""" + monkeypatch.delenv("MCP_CLI_INCLUDE_TOOLS", raising=False) + assert get_env_list(EnvVar.CLI_INCLUDE_TOOLS, default=["default"]) == [ + "default" + ] + + def test_get_env_list_returns_empty_list_when_not_set_no_default( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_list returns empty list when var not set and no default.""" + monkeypatch.delenv("MCP_CLI_INCLUDE_TOOLS", raising=False) + assert get_env_list(EnvVar.CLI_INCLUDE_TOOLS) == [] + + def test_get_env_list_filters_empty_items( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test get_env_list filters out empty items.""" + monkeypatch.setenv("MCP_CLI_INCLUDE_TOOLS", "tool1,,tool2, ,tool3") + result = get_env_list(EnvVar.CLI_INCLUDE_TOOLS) + assert result == ["tool1", "tool2", "tool3"] diff --git a/tests/config/test_logging.py b/tests/config/test_logging.py new file mode 100644 index 00000000..bea0aae2 --- /dev/null +++ b/tests/config/test_logging.py @@ -0,0 +1,171 @@ +# tests/config/test_logging.py +"""Tests for logging configuration.""" + +from __future__ import annotations + +import logging +import os + +import pytest + +from mcp_cli.config.logging import ( + configure_mcp_server_logging, + get_logger, + setup_clean_logging, + setup_logging, + setup_quiet_logging, + setup_silent_mcp_environment, + setup_verbose_logging, +) + + +class TestSetupLogging: + """Tests for setup_logging function.""" + + def test_setup_logging_default(self) -> None: + """Test setup_logging with default parameters.""" + setup_logging() + root = logging.getLogger() + assert root.level == logging.WARNING + + def test_setup_logging_quiet_mode(self) -> None: + """Test setup_logging with quiet=True.""" + setup_logging(quiet=True) + root = logging.getLogger() + assert root.level == logging.ERROR + + def test_setup_logging_verbose_mode(self) -> None: + """Test setup_logging with verbose=True.""" + setup_logging(verbose=True) + root = logging.getLogger() + assert root.level == logging.DEBUG + + def test_setup_logging_invalid_level(self) -> None: + """Test setup_logging with invalid level raises ValueError.""" + with pytest.raises(ValueError, match="Invalid log level"): + setup_logging(level="INVALID_LEVEL") + + def test_setup_logging_json_format(self) -> None: + """Test setup_logging with JSON format.""" + setup_logging(format_style="json") + root = logging.getLogger() + assert len(root.handlers) > 0 + # Check formatter contains JSON structure + handler = root.handlers[0] + assert handler.formatter is not None + assert "timestamp" in handler.formatter._fmt + + def test_setup_logging_detailed_format(self) -> None: + """Test setup_logging with detailed format.""" + setup_logging(format_style="detailed") + root = logging.getLogger() + assert len(root.handlers) > 0 + handler = root.handlers[0] + assert handler.formatter is not None + assert "%(asctime)s" in handler.formatter._fmt + + def test_setup_logging_simple_format(self) -> None: + """Test setup_logging with simple format (default).""" + setup_logging(format_style="simple") + root = logging.getLogger() + assert len(root.handlers) > 0 + handler = root.handlers[0] + assert handler.formatter is not None + assert "%(levelname)" in handler.formatter._fmt + + def test_setup_logging_sets_chuk_env_var( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test that setup_logging sets CHUK_LOG_LEVEL env var.""" + monkeypatch.delenv("CHUK_LOG_LEVEL", raising=False) + setup_logging(level="INFO") + assert os.environ.get("CHUK_LOG_LEVEL") == "INFO" + + +class TestGetLogger: + """Tests for get_logger function.""" + + def test_get_logger_returns_logger(self) -> None: + """Test get_logger returns a logger with mcp_cli prefix.""" + logger = get_logger("test_module") + assert logger.name == "mcp_cli.test_module" + + def test_get_logger_different_names(self) -> None: + """Test get_logger returns different loggers for different names.""" + logger1 = get_logger("module1") + logger2 = get_logger("module2") + assert logger1.name != logger2.name + + +class TestConvenienceFunctions: + """Tests for convenience logging functions.""" + + def test_setup_quiet_logging(self) -> None: + """Test setup_quiet_logging sets ERROR level.""" + setup_quiet_logging() + root = logging.getLogger() + assert root.level == logging.ERROR + + def test_setup_verbose_logging(self) -> None: + """Test setup_verbose_logging sets DEBUG level with detailed format.""" + setup_verbose_logging() + root = logging.getLogger() + assert root.level == logging.DEBUG + + def test_setup_clean_logging(self) -> None: + """Test setup_clean_logging sets WARNING level.""" + setup_clean_logging() + root = logging.getLogger() + assert root.level == logging.WARNING + + +class TestConfigureMCPServerLogging: + """Tests for configure_mcp_server_logging function.""" + + def test_configure_mcp_server_logging_suppress(self) -> None: + """Test configure_mcp_server_logging with suppress=True.""" + configure_mcp_server_logging(suppress=True) + # Check that framework loggers are set to CRITICAL + logger = logging.getLogger("chuk_mcp_runtime") + assert logger.level == logging.CRITICAL + assert logger.propagate is False + + def test_configure_mcp_server_logging_no_suppress(self) -> None: + """Test configure_mcp_server_logging with suppress=False.""" + configure_mcp_server_logging(suppress=False) + # Check that framework loggers are set to INFO + logger = logging.getLogger("chuk_mcp_runtime") + assert logger.level == logging.INFO + + +class TestSetupSilentMCPEnvironment: + """Tests for setup_silent_mcp_environment function.""" + + def test_setup_silent_mcp_environment_sets_env_vars( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test that setup_silent_mcp_environment sets required env vars.""" + # Clear any existing env vars + for var in ["LOG_LEVEL", "VERBOSE", "DEBUG", "QUIET"]: + monkeypatch.delenv(var, raising=False) + + setup_silent_mcp_environment() + + # Check environment variables are set + assert os.environ.get("LOG_LEVEL") == "ERROR" + assert os.environ.get("VERBOSE") == "0" + assert os.environ.get("DEBUG") == "0" + assert os.environ.get("QUIET") == "1" + + def test_setup_silent_mcp_environment_creates_startup_script( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Test that setup_silent_mcp_environment creates PYTHONSTARTUP script.""" + monkeypatch.delenv("PYTHONSTARTUP", raising=False) + + setup_silent_mcp_environment() + + # Check PYTHONSTARTUP is set and file exists + startup_path = os.environ.get("PYTHONSTARTUP") + assert startup_path is not None + assert os.path.exists(startup_path) diff --git a/tests/config/test_runtime.py b/tests/config/test_runtime.py new file mode 100644 index 00000000..98ccdd90 --- /dev/null +++ b/tests/config/test_runtime.py @@ -0,0 +1,283 @@ +# tests/config/test_runtime.py +"""Tests for config/runtime.py module.""" + +import json +import pytest + +from mcp_cli.config.runtime import RuntimeConfig, ResolvedValue +from mcp_cli.config.models import MCPConfig, ConfigOverride, TimeoutConfig, ToolConfig +from mcp_cli.config.enums import ConfigSource, TimeoutType + + +class TestResolvedValue: + """Test ResolvedValue model.""" + + def test_resolved_value_creation(self): + """Test creating ResolvedValue.""" + rv = ResolvedValue(value=45.0, source=ConfigSource.CLI) + assert rv.value == 45.0 + assert rv.source == ConfigSource.CLI + + def test_resolved_value_immutable(self): + """Test ResolvedValue is immutable.""" + rv = ResolvedValue(value=30.0, source=ConfigSource.ENV) + with pytest.raises(Exception): # frozen=True raises ValidationError + rv.value = 60.0 + + +class TestRuntimeConfig: + """Test RuntimeConfig class.""" + + @pytest.fixture + def base_config(self, tmp_path): + """Create a base MCPConfig.""" + config_data = { + "defaultProvider": "openai", + "defaultModel": "gpt-4", + "timeouts": { + "streaming_chunk": 45.0, + "streaming_global": 300.0, + }, + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config_data)) + return MCPConfig.load_sync(config_file) + + def test_runtime_config_creation(self, base_config): + """Test creating RuntimeConfig.""" + rc = RuntimeConfig(base_config) + assert rc._file_config == base_config + assert rc._cli_overrides is not None + + def test_runtime_config_with_overrides(self, base_config): + """Test RuntimeConfig with CLI overrides.""" + overrides = ConfigOverride(provider="anthropic", model="claude-3") + rc = RuntimeConfig(base_config, overrides) + assert rc._cli_overrides == overrides + + def test_get_timeout_from_file_config(self, base_config): + """Test getting timeout from file config.""" + rc = RuntimeConfig(base_config) + timeout = rc.get_timeout(TimeoutType.STREAMING_CHUNK) + # Should get default from TimeoutConfig since our config doesn't have all fields + assert timeout > 0 + + def test_get_timeout_from_cli_override(self, base_config): + """Test getting timeout from CLI override.""" + overrides = ConfigOverride() + overrides.set_timeout(TimeoutType.STREAMING_CHUNK, 60.0) + + rc = RuntimeConfig(base_config, overrides) + timeout = rc.get_timeout(TimeoutType.STREAMING_CHUNK) + assert timeout == 60.0 + + def test_get_timeout_caching(self, base_config): + """Test that timeout values are cached.""" + rc = RuntimeConfig(base_config) + + # First call + timeout1 = rc.get_timeout(TimeoutType.STREAMING_CHUNK) + # Second call should return cached value + timeout2 = rc.get_timeout(TimeoutType.STREAMING_CHUNK) + + assert timeout1 == timeout2 + assert TimeoutType.STREAMING_CHUNK in rc._timeout_cache + + def test_get_timeout_from_env(self, base_config, monkeypatch): + """Test getting timeout from environment variable.""" + monkeypatch.setenv("MCP_STREAMING_CHUNK_TIMEOUT", "99.0") + + rc = RuntimeConfig(base_config) + timeout = rc.get_timeout(TimeoutType.STREAMING_CHUNK) + assert timeout == 99.0 + + def test_get_timeout_from_tool_timeout_env(self, base_config, monkeypatch): + """Test getting timeout from MCP_TOOL_TIMEOUT env var.""" + monkeypatch.setenv("MCP_TOOL_TIMEOUT", "150.0") + + rc = RuntimeConfig(base_config) + timeout = rc.get_timeout(TimeoutType.TOOL_EXECUTION) + assert timeout == 150.0 + + @pytest.mark.asyncio + async def test_get_timeout_async(self, base_config): + """Test async timeout getter.""" + rc = RuntimeConfig(base_config) + timeout = await rc.get_timeout_async(TimeoutType.STREAMING_CHUNK) + assert timeout > 0 + + def test_get_all_timeouts(self, base_config): + """Test getting all timeouts.""" + rc = RuntimeConfig(base_config) + timeouts = rc.get_all_timeouts() + + assert isinstance(timeouts, TimeoutConfig) + assert timeouts.streaming_chunk > 0 + assert timeouts.streaming_global > 0 + assert timeouts.tool_execution > 0 + + @pytest.mark.asyncio + async def test_get_all_timeouts_async(self, base_config): + """Test async getting all timeouts.""" + rc = RuntimeConfig(base_config) + timeouts = await rc.get_all_timeouts_async() + assert isinstance(timeouts, TimeoutConfig) + + def test_get_tool_config(self, base_config): + """Test getting tool configuration.""" + rc = RuntimeConfig(base_config) + tool_config = rc.get_tool_config() + + assert isinstance(tool_config, ToolConfig) + assert tool_config.max_concurrency > 0 + + def test_get_tool_config_with_cli_overrides(self, base_config): + """Test getting tool config with CLI overrides.""" + overrides = ConfigOverride() + overrides.tools["include_tools"] = ["tool1", "tool2"] + overrides.tools["confirm_tools"] = True + + rc = RuntimeConfig(base_config, overrides) + tool_config = rc.get_tool_config() + + assert tool_config.include_tools == ["tool1", "tool2"] + assert tool_config.confirm_tools is True + + def test_get_tool_config_with_env_vars(self, base_config, monkeypatch): + """Test getting tool config from environment variables.""" + monkeypatch.setenv("MCP_CLI_INCLUDE_TOOLS", "tool_a,tool_b") + monkeypatch.setenv("MCP_CLI_DYNAMIC_TOOLS", "true") + + rc = RuntimeConfig(base_config) + tool_config = rc.get_tool_config() + + assert tool_config.include_tools == ["tool_a", "tool_b"] + assert tool_config.dynamic_tools_enabled is True + + @pytest.mark.asyncio + async def test_get_tool_config_async(self, base_config): + """Test async tool config getter.""" + rc = RuntimeConfig(base_config) + tool_config = await rc.get_tool_config_async() + assert isinstance(tool_config, ToolConfig) + + def test_provider_property(self, base_config): + """Test provider property resolution.""" + rc = RuntimeConfig(base_config) + assert rc.provider == "openai" + + def test_provider_property_cli_override(self, base_config): + """Test provider from CLI override.""" + overrides = ConfigOverride(provider="anthropic") + rc = RuntimeConfig(base_config, overrides) + assert rc.provider == "anthropic" + + def test_provider_property_env_override(self, base_config, monkeypatch): + """Test provider from environment variable.""" + monkeypatch.setenv("MCP_PROVIDER", "ollama") + rc = RuntimeConfig(base_config) + assert rc.provider == "ollama" + + def test_model_property(self, base_config): + """Test model property resolution.""" + rc = RuntimeConfig(base_config) + # Model comes from config or defaults - just verify it exists + assert rc.model is not None + + def test_model_property_cli_override(self, base_config): + """Test model from CLI override.""" + overrides = ConfigOverride(model="claude-3-opus") + rc = RuntimeConfig(base_config, overrides) + assert rc.model == "claude-3-opus" + + def test_model_property_env_override(self, base_config, monkeypatch): + """Test model from environment variable.""" + monkeypatch.setenv("MCP_MODEL", "llama2") + rc = RuntimeConfig(base_config) + assert rc.model == "llama2" + + def test_debug_report(self, base_config): + """Test debug report generation.""" + rc = RuntimeConfig(base_config) + report = rc.debug_report() + + assert "timeouts" in report + assert "provider" in report + assert "model" in report + assert "tools" in report + + # Check timeout structure + for tt in TimeoutType: + assert tt.value in report["timeouts"] + assert "value" in report["timeouts"][tt.value] + assert "source" in report["timeouts"][tt.value] + + def test_get_tool_list_include(self, base_config, monkeypatch): + """Test _get_tool_list for include_tools.""" + monkeypatch.setenv("MCP_CLI_INCLUDE_TOOLS", "tool1,tool2,tool3") + + rc = RuntimeConfig(base_config) + tools = rc._get_tool_list("include_tools") + assert tools == ["tool1", "tool2", "tool3"] + + def test_get_tool_list_exclude(self, base_config, monkeypatch): + """Test _get_tool_list for exclude_tools.""" + monkeypatch.setenv("MCP_CLI_EXCLUDE_TOOLS", "bad_tool") + + rc = RuntimeConfig(base_config) + tools = rc._get_tool_list("exclude_tools") + assert tools == ["bad_tool"] + + def test_get_tool_list_from_cli(self, base_config): + """Test _get_tool_list from CLI overrides.""" + overrides = ConfigOverride() + overrides.tools["include_tools"] = ["cli_tool"] + + rc = RuntimeConfig(base_config, overrides) + tools = rc._get_tool_list("include_tools") + assert tools == ["cli_tool"] + + def test_get_tool_bool_from_env(self, base_config, monkeypatch): + """Test _get_tool_bool from environment.""" + monkeypatch.setenv("MCP_CLI_DYNAMIC_TOOLS", "true") + + rc = RuntimeConfig(base_config) + result = rc._get_tool_bool("dynamic_tools_enabled") + assert result is True + + def test_get_tool_bool_from_cli(self, base_config): + """Test _get_tool_bool from CLI overrides.""" + overrides = ConfigOverride() + overrides.tools["confirm_tools"] = True + + rc = RuntimeConfig(base_config, overrides) + result = rc._get_tool_bool("confirm_tools") + assert result is True + + def test_get_tool_int_from_cli(self, base_config): + """Test _get_tool_int from CLI overrides.""" + overrides = ConfigOverride() + overrides.tools["max_concurrency"] = 5 + + rc = RuntimeConfig(base_config, overrides) + result = rc._get_tool_int("max_concurrency") + assert result == 5 + + def test_get_tool_int_invalid_value(self, base_config): + """Test _get_tool_int with invalid value.""" + overrides = ConfigOverride() + overrides.tools["max_concurrency"] = "not_a_number" + + rc = RuntimeConfig(base_config, overrides) + result = rc._get_tool_int("max_concurrency") + assert result is None + + def test_resolve_timeout_all_types(self, base_config): + """Test resolving all timeout types.""" + rc = RuntimeConfig(base_config) + + for timeout_type in TimeoutType: + resolved = rc._resolve_timeout(timeout_type) + assert isinstance(resolved, ResolvedValue) + assert resolved.value > 0 + assert resolved.source in ConfigSource diff --git a/tests/display/__init__.py b/tests/display/__init__.py new file mode 100644 index 00000000..7f8f1edd --- /dev/null +++ b/tests/display/__init__.py @@ -0,0 +1 @@ +"""Tests for display system.""" diff --git a/tests/display/test_color_converter.py b/tests/display/test_color_converter.py new file mode 100644 index 00000000..ea89025e --- /dev/null +++ b/tests/display/test_color_converter.py @@ -0,0 +1,201 @@ +"""Tests for display color converter utilities.""" + +from mcp_cli.display import rich_to_prompt_toolkit, create_transparent_completion_style + + +class TestRichToPromptToolkit: + """Test rich_to_prompt_toolkit color conversion.""" + + def test_none_color(self): + """Test None color returns default.""" + assert rich_to_prompt_toolkit(None) == "ansibrightblack" + + def test_empty_color(self): + """Test empty string returns default.""" + assert rich_to_prompt_toolkit("") == "ansibrightblack" + + def test_dim_color(self): + """Test dim color.""" + assert rich_to_prompt_toolkit("dim") == "ansibrightblack" + + def test_basic_colors(self): + """Test basic color conversions.""" + assert rich_to_prompt_toolkit("white") == "ansiwhite" + assert rich_to_prompt_toolkit("black") == "ansiblack" + assert rich_to_prompt_toolkit("red") == "ansired" + assert rich_to_prompt_toolkit("green") == "ansigreen" + assert rich_to_prompt_toolkit("yellow") == "ansiyellow" + assert rich_to_prompt_toolkit("blue") == "ansiblue" + assert rich_to_prompt_toolkit("magenta") == "ansimagenta" + assert rich_to_prompt_toolkit("cyan") == "ansicyan" + + def test_bright_colors(self): + """Test bright color conversions with bold modifier.""" + assert rich_to_prompt_toolkit("bright_white") == "ansiwhite bold" + assert rich_to_prompt_toolkit("bright_black") == "ansibrightblack" + assert rich_to_prompt_toolkit("bright_red") == "ansired bold" + assert rich_to_prompt_toolkit("bright_green") == "ansigreen bold" + assert rich_to_prompt_toolkit("bright_yellow") == "ansiyellow bold" + assert rich_to_prompt_toolkit("bright_blue") == "ansiblue bold" + assert rich_to_prompt_toolkit("bright_magenta") == "ansimagenta bold" + assert rich_to_prompt_toolkit("bright_cyan") == "ansicyan bold" + + def test_dark_colors(self): + """Test dark color conversions.""" + assert rich_to_prompt_toolkit("dark_red") == "ansired" + assert rich_to_prompt_toolkit("dark_green") == "ansigreen" + assert rich_to_prompt_toolkit("dark_yellow") == "ansiyellow" + assert rich_to_prompt_toolkit("dark_blue") == "ansiblue" + assert rich_to_prompt_toolkit("dark_magenta") == "ansimagenta" + assert rich_to_prompt_toolkit("dark_cyan") == "ansicyan" + assert rich_to_prompt_toolkit("dark_goldenrod") == "ansiyellow" + + def test_special_colors(self): + """Test special color names.""" + assert rich_to_prompt_toolkit("grey50") == "ansibrightblack" + assert rich_to_prompt_toolkit("gray50") == "ansibrightblack" + assert rich_to_prompt_toolkit("default") == "ansiwhite" + + def test_modifiers(self): + """Test style modifiers.""" + assert rich_to_prompt_toolkit("bold") == "bold" + assert rich_to_prompt_toolkit("underline") == "underline" + assert rich_to_prompt_toolkit("italic") == "italic" + + def test_composite_styles(self): + """Test composite styles like 'bold yellow'.""" + result = rich_to_prompt_toolkit("bold yellow") + assert "bold" in result + assert "ansiyellow" in result + + def test_composite_with_bright(self): + """Test composite with bright colors.""" + result = rich_to_prompt_toolkit("bold bright_red") + assert "bold" in result + assert "ansired" in result + + def test_composite_avoids_duplicates(self): + """Test that composite styles process all parts.""" + result = rich_to_prompt_toolkit("bold bold yellow") + # The function processes each part, so duplicates may appear + # Just verify the expected components are there + assert "bold" in result + assert "ansiyellow" in result + + def test_unknown_color(self): + """Test unknown color returns default.""" + assert rich_to_prompt_toolkit("unknown_color_xyz") == "ansiwhite" + + def test_composite_with_unknown(self): + """Test composite with unknown color.""" + result = rich_to_prompt_toolkit("bold unknown_color") + # Should extract the last part and fall back if unknown + assert "ansiwhite" in result or result == "ansiwhite" + + +class MockColorScheme: + """Mock color scheme for testing.""" + + def __init__( + self, + accent="bright_white", + highlight="bright_yellow", + dim="grey50", + normal="white", + ): + self.accent = accent + self.highlight = highlight + self.dim = dim + self.normal = normal + + +class TestCreateTransparentCompletionStyle: + """Test create_transparent_completion_style function.""" + + def test_default_black_background(self): + """Test default black background style.""" + colors = MockColorScheme() + style = create_transparent_completion_style(colors) + + assert "completion-menu" in style + assert style["completion-menu"] == "bg:ansiblack" + + def test_white_background(self): + """Test white background style.""" + colors = MockColorScheme() + style = create_transparent_completion_style(colors, background_color="white") + + assert style["completion-menu"] == "bg:ansiwhite" + + def test_no_background(self): + """Test no background (empty string).""" + colors = MockColorScheme() + style = create_transparent_completion_style(colors, background_color="") + + assert style["completion-menu"] == "" + + def test_accent_color_applied(self): + """Test accent color is applied to completions.""" + colors = MockColorScheme(accent="bright_cyan") + style = create_transparent_completion_style(colors) + + # Verify accent color is converted and applied + completion = style["completion-menu.completion"] + assert "ansicyan" in completion or "bold" in completion + + def test_highlight_color_applied(self): + """Test highlight color is applied to current completion.""" + colors = MockColorScheme(highlight="bright_green") + style = create_transparent_completion_style(colors) + + completion_current = style["completion-menu.completion.current"] + assert "ansigreen bold" in completion_current or "bold" in completion_current + assert "underline" in completion_current + + def test_dim_color_applied(self): + """Test dim color is applied to meta text.""" + colors = MockColorScheme(dim="grey50") + style = create_transparent_completion_style(colors) + + assert "ansibrightblack" in style["completion-menu.meta"] + + def test_all_required_keys_present(self): + """Test all required style keys are present.""" + colors = MockColorScheme() + style = create_transparent_completion_style(colors) + + required_keys = [ + "completion-menu", + "completion-menu.completion", + "completion-menu.completion.current", + "completion-menu.meta", + "completion-menu.meta.current", + "completion-menu.multi-column-meta", + "completion-menu.border", + "scrollbar.background", + "scrollbar.button", + "auto-suggestion", + ] + + for key in required_keys: + assert key in style, f"Missing required key: {key}" + + def test_background_applied_to_all_elements(self): + """Test background is applied to all menu elements.""" + colors = MockColorScheme() + style = create_transparent_completion_style(colors, background_color="blue") + + # Check that background is in various elements + assert "bg:ansiblue" in style["completion-menu"] + assert "bg:ansiblue" in style["completion-menu.completion"] + assert "bg:ansiblue" in style["completion-menu.meta"] + assert "bg:ansiblue" in style["scrollbar.background"] + + def test_auto_suggestion_no_background(self): + """Test auto-suggestion doesn't have background (only foreground).""" + colors = MockColorScheme(dim="grey50") + style = create_transparent_completion_style(colors, background_color="black") + + # Auto-suggestion should only have color, not background + assert "bg:" not in style["auto-suggestion"] + assert "ansibrightblack" in style["auto-suggestion"] diff --git a/tests/display/test_formatters.py b/tests/display/test_formatters.py new file mode 100644 index 00000000..d0ae7b42 --- /dev/null +++ b/tests/display/test_formatters.py @@ -0,0 +1,304 @@ +"""Tests for display formatters module.""" + +from mcp_cli.display.formatters import ( + format_args_preview, + format_reasoning_preview, + format_content_preview, +) + + +class TestFormatArgsPreview: + """Tests for format_args_preview function.""" + + def test_empty_arguments(self): + """Test with empty dict.""" + result = format_args_preview({}) + assert result == "" + + def test_single_string_argument(self): + """Test with single string argument.""" + result = format_args_preview({"name": "test"}) + assert result == "name=test" + + def test_multiple_arguments_within_limit(self): + """Test with 2 arguments (default limit).""" + result = format_args_preview({"host": "localhost", "port": "8080"}) + assert "host=localhost" in result + assert "port=8080" in result + + def test_more_than_max_args(self): + """Test that only first N args shown with indicator.""" + args = {"a": "1", "b": "2", "c": "3", "d": "4"} + result = format_args_preview(args, max_args=2) + + # Should have first 2 args + assert "a=1" in result + assert "b=2" in result + + # Should have indicator for more + assert "+2 more" in result + + # Should NOT show c or d + assert "c=3" not in result + + def test_long_string_value_truncated(self): + """Test that long string values are truncated.""" + long_str = "x" * 100 + result = format_args_preview({"data": long_str}, max_len=40) + + assert "data=" in result + assert "..." in result + assert len(result) < 100 + + def test_dict_value(self): + """Test formatting of dict value.""" + result = format_args_preview({"config": {"key": "value"}}) + assert "config=" in result + assert "key" in result + assert "value" in result + + def test_list_value(self): + """Test formatting of list value.""" + result = format_args_preview({"items": [1, 2, 3]}) + assert "items=" in result + assert "[1, 2, 3]" in result or "[1,2,3]" in result + + def test_large_dict_value_truncated(self): + """Test that large dict values are truncated.""" + large_dict = {f"key_{i}": f"value_{i}" for i in range(20)} + result = format_args_preview({"data": large_dict}, max_len=40) + + assert "data=" in result + assert "..." in result + + def test_large_list_value_truncated(self): + """Test that large list values are truncated.""" + large_list = list(range(50)) + result = format_args_preview({"numbers": large_list}, max_len=40) + + assert "numbers=" in result + assert "..." in result + + def test_integer_value(self): + """Test formatting of integer value.""" + result = format_args_preview({"count": 42}) + assert "count=42" in result + + def test_boolean_value(self): + """Test formatting of boolean value.""" + result = format_args_preview({"enabled": True}) + assert "enabled=True" in result + + def test_none_value(self): + """Test formatting of None value.""" + result = format_args_preview({"optional": None}) + assert "optional=None" in result + + def test_custom_max_args(self): + """Test with custom max_args parameter.""" + args = {"a": "1", "b": "2", "c": "3"} + result = format_args_preview(args, max_args=3) + + # All 3 should be shown + assert "a=1" in result + assert "b=2" in result + assert "c=3" in result + assert "more" not in result + + def test_custom_max_len(self): + """Test with custom max_len parameter.""" + result = format_args_preview({"data": "x" * 100}, max_len=10) + + # Should be truncated to ~10 chars + assert "..." in result + assert len(result) < 50 + + +class TestFormatReasoningPreview: + """Tests for format_reasoning_preview function.""" + + def test_empty_reasoning(self): + """Test with empty string.""" + result = format_reasoning_preview("") + assert result == "" + + def test_short_reasoning(self): + """Test with reasoning shorter than max_len.""" + short = "This is short" + result = format_reasoning_preview(short) + assert result == short + + def test_long_reasoning_from_end(self): + """Test showing last N chars (default).""" + long_text = "The quick brown fox jumps over the lazy dog. This is the end part." + result = format_reasoning_preview(long_text, max_len=30, from_end=True) + + # Should start with ... + assert result.startswith("...") + + # Should contain end part + assert "end part" in result + + # Should NOT contain beginning + assert "quick" not in result + + def test_long_reasoning_from_start(self): + """Test showing first N chars.""" + long_text = "This is the start. More text here. And even more at the end." + result = format_reasoning_preview(long_text, max_len=30, from_end=False) + + # Should end with ... + assert result.endswith("...") + + # Should contain start + assert "start" in result + + # Should NOT contain end + assert "end" not in result + + def test_word_boundary_from_end(self): + """Test that it tries to break at word boundary from end.""" + text = "word1 word2 word3 word4 word5 word6" + result = format_reasoning_preview(text, max_len=20, from_end=True) + + # Should have ellipsis + assert "..." in result + + # Should not break in middle of word (should find space) + # Result should not have partial words at start + words_in_result = result.replace("...", "").strip().split() + assert all(word in text for word in words_in_result) + + def test_word_boundary_from_start(self): + """Test that it tries to break at word boundary from start.""" + text = "word1 word2 word3 word4 word5 word6" + result = format_reasoning_preview(text, max_len=20, from_end=False) + + # Should have ellipsis + assert "..." in result + + # Should not break in middle of word + words_in_result = result.replace("...", "").strip().split() + assert all(word in text for word in words_in_result) + + def test_no_spaces_from_end(self): + """Test text with no spaces when showing from end.""" + text = "x" * 100 + result = format_reasoning_preview(text, max_len=30, from_end=True) + + # Should have ellipsis + assert result.startswith("...") + + # Should have ~30 chars (plus ellipsis) + assert len(result) <= 35 + + def test_no_spaces_from_start(self): + """Test text with no spaces when showing from start.""" + text = "x" * 100 + result = format_reasoning_preview(text, max_len=30, from_end=False) + + # Should have ellipsis + assert result.endswith("...") + + # Should have ~30 chars (plus ellipsis) + assert len(result) <= 35 + + def test_exact_length(self): + """Test with text exactly at max_len.""" + text = "x" * 50 + result = format_reasoning_preview(text, max_len=50) + + # Should return as-is + assert result == text + + def test_custom_max_len(self): + """Test with custom max_len.""" + text = "x" * 100 + result = format_reasoning_preview(text, max_len=10) + + assert "..." in result + assert len(result) <= 15 + + +class TestFormatContentPreview: + """Tests for format_content_preview function.""" + + def test_empty_content(self): + """Test with empty string.""" + result = format_content_preview("") + assert result == "" + + def test_short_content(self): + """Test with content shorter than max_len.""" + short = "Short text" + result = format_content_preview(short) + assert result == short + + def test_long_content(self): + """Test with content longer than max_len.""" + long_text = "x" * 200 + result = format_content_preview(long_text, max_len=100) + + # Should have ellipsis + assert result.endswith("...") + + # Should be truncated + assert len(result) <= 110 + + def test_word_boundary_breaking(self): + """Test that it breaks at word boundaries when possible.""" + text = "The quick brown fox jumps over the lazy dog. More text here." + result = format_content_preview(text, max_len=30) + + # Should have ellipsis + assert result.endswith("...") + + # Should not have partial words at end (should break at space) + words = result.replace("...", "").strip().split() + assert all(word in text for word in words) + + def test_word_boundary_too_early(self): + """Test that word boundary is only used if reasonably far.""" + # Create text where first space is very early + text = "a " + "x" * 200 + result = format_content_preview(text, max_len=100) + + # Should still truncate at max_len, not use early space + # (space is at position 1, which is < max_len/2 = 50) + assert len(result) > 50 + + def test_no_spaces(self): + """Test content with no spaces.""" + text = "x" * 200 + result = format_content_preview(text, max_len=100) + + # Should have ellipsis + assert result.endswith("...") + + # Should truncate at max_len + assert len(result) <= 105 + + def test_exact_length(self): + """Test with content exactly at max_len.""" + text = "x" * 100 + result = format_content_preview(text, max_len=100) + + # Should return as-is + assert result == text + + def test_custom_max_len(self): + """Test with custom max_len.""" + text = "x" * 200 + result = format_content_preview(text, max_len=50) + + assert result.endswith("...") + assert len(result) <= 55 + + def test_multiline_content(self): + """Test with multiline content.""" + text = "Line 1\nLine 2\nLine 3\n" + "More text " * 20 + result = format_content_preview(text, max_len=50) + + # Should truncate + assert result.endswith("...") + assert len(result) <= 55 diff --git a/tests/ui/test_formatting.py b/tests/display/test_formatting.py similarity index 55% rename from tests/ui/test_formatting.py rename to tests/display/test_formatting.py index adb57cdf..1f0e9999 100644 --- a/tests/ui/test_formatting.py +++ b/tests/display/test_formatting.py @@ -2,7 +2,7 @@ import pytest from rich.table import Table -from mcp_cli.ui.formatting import ( +from mcp_cli.display import ( format_tool_for_display, create_tools_table, create_servers_table, @@ -170,3 +170,128 @@ def test_display_tool_call_failure(result, capsys): if captured.out: expected_error = result.error or "Unknown error" assert expected_error in captured.out or "Error:" in captured.out + + +def test_display_tool_call_large_list_of_dicts(capsys): + """Test display of large list of dict results (>10 items).""" + large_list = [{"id": i, "value": f"item_{i}"} for i in range(15)] + result = ToolCallResult( + tool_name="test", success=True, result=large_list, error=None + ) + display_tool_call_result(result, console=None) + captured = capsys.readouterr() + output_text = captured.out + captured.err + + # Should show summary instead of table + assert "15 records" in output_text or "First 3 records" in output_text + + +def test_display_tool_call_simple_list(capsys): + """Test display of simple list (non-dict items).""" + simple_list = ["item1", "item2", "item3"] + result = ToolCallResult( + tool_name="test", success=True, result=simple_list, error=None + ) + display_tool_call_result(result, console=None) + captured = capsys.readouterr() + output_text = captured.out + captured.err + + # Should show items with bullet points + assert "3 items" in output_text or "item1" in output_text + + +def test_display_tool_call_large_simple_list(capsys): + """Test display of large simple list (>10 items).""" + large_list = [f"item_{i}" for i in range(15)] + result = ToolCallResult( + tool_name="test", success=True, result=large_list, error=None + ) + display_tool_call_result(result, console=None) + captured = capsys.readouterr() + output_text = captured.out + captured.err + + # Should show truncation message + assert "15 items" in output_text or "5 more" in output_text + + +def test_display_tool_call_large_dict(capsys): + """Test display of large dict (>10 keys).""" + large_dict = {f"key_{i}": f"value_{i}" for i in range(15)} + result = ToolCallResult( + tool_name="test", success=True, result=large_dict, error=None + ) + display_tool_call_result(result, console=None) + # Should use JSON format for large dicts + # Just verify it doesn't crash + assert True + + +def test_display_tool_call_very_large_dict(capsys): + """Test display of very large dict with >500 chars JSON.""" + # Create a dict that will be >500 chars when serialized + large_dict = {f"key_{i}": f"value_{i}" * 20 for i in range(20)} + result = ToolCallResult( + tool_name="test", success=True, result=large_dict, error=None + ) + display_tool_call_result(result, console=None) + captured = capsys.readouterr() + output_text = captured.out + captured.err + + # Should show truncation + assert "truncated" in output_text.lower() or len(output_text) > 0 + + +def test_display_tool_call_long_string(capsys): + """Test display of long string result (>500 chars).""" + long_string = "x" * 600 + result = ToolCallResult( + tool_name="test", success=True, result=long_string, error=None + ) + display_tool_call_result(result, console=None) + captured = capsys.readouterr() + output_text = captured.out + captured.err + + # Should show truncation message + assert "truncated" in output_text.lower() or "x" * 500 in output_text + + +def test_display_tool_call_other_type_serializable(capsys): + """Test display of other types that can be JSON serialized.""" + result = ToolCallResult(tool_name="test", success=True, result=123, error=None) + display_tool_call_result(result, console=None) + # Should serialize as JSON + # Just verify it doesn't crash + assert True + + +def test_display_tool_call_other_type_large_json(capsys): + """Test display of other type with large JSON output.""" + # Create a large nested structure + large_obj = {"data": ["x" * 100 for _ in range(10)]} + result = ToolCallResult( + tool_name="test", success=True, result=large_obj, error=None + ) + display_tool_call_result(result, console=None) + captured = capsys.readouterr() + output_text = captured.out + captured.err + + # Should handle large JSON + assert len(output_text) > 0 or "truncated" in output_text.lower() + + +class NonSerializable: + """Class that can't be JSON serialized.""" + + pass + + +def test_display_tool_call_non_serializable(capsys): + """Test display of non-JSON-serializable result.""" + obj = NonSerializable() + result = ToolCallResult(tool_name="test", success=True, result=obj, error=None) + display_tool_call_result(result, console=None) + captured = capsys.readouterr() + output_text = captured.out + captured.err + + # Should fallback to str() representation + assert "NonSerializable" in output_text or len(output_text) > 0 diff --git a/tests/display/test_manager.py b/tests/display/test_manager.py new file mode 100644 index 00000000..27b72fe4 --- /dev/null +++ b/tests/display/test_manager.py @@ -0,0 +1,386 @@ +"""Tests for StreamingDisplayManager.""" + +import asyncio + +import pytest + +from mcp_cli.display.manager import ChukTermRenderer, StreamingDisplayManager +from mcp_cli.display.models import ContentType, StreamingPhase + + +class TestStreamingDisplayManager: + """Tests for StreamingDisplayManager.""" + + @pytest.fixture + def manager(self): + """Create a display manager instance.""" + return StreamingDisplayManager() + + @pytest.mark.asyncio + async def test_initial_state(self, manager): + """Test initial manager state.""" + assert manager.streaming_state is None + assert manager.tool_execution is None + assert not manager.is_streaming + assert not manager.is_tool_executing + assert not manager.is_busy + + @pytest.mark.asyncio + async def test_start_streaming(self, manager): + """Test starting streaming creates state.""" + await manager.start_streaming() + + assert manager.streaming_state is not None + assert manager.streaming_state.phase == StreamingPhase.INITIALIZING + assert manager.is_streaming + assert manager.is_busy + + # Cleanup + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_add_chunk_creates_state_if_needed(self, manager): + """Test adding chunk auto-creates state.""" + raw_chunk = {"content": "Hello"} + + await manager.add_chunk(raw_chunk) + + assert manager.streaming_state is not None + assert manager.streaming_state.accumulated_content == "Hello" + + # Cleanup + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_add_multiple_chunks(self, manager): + """Test adding multiple chunks accumulates content.""" + await manager.start_streaming() + + await manager.add_chunk({"content": "Hello "}) + await manager.add_chunk({"content": "world"}) + await manager.add_chunk({"content": "!"}) + + assert manager.streaming_state.accumulated_content == "Hello world!" + assert manager.streaming_state.chunks_received == 3 + + # Cleanup + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_add_chunk_with_reasoning(self, manager): + """Test adding chunk with reasoning content.""" + await manager.start_streaming() + + await manager.add_chunk( + {"content": "Answer", "reasoning_content": "Let me think..."} + ) + + assert manager.streaming_state.accumulated_content == "Answer" + assert manager.streaming_state.reasoning_content == "Let me think..." + + # Cleanup + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_update_reasoning(self, manager): + """Test updating reasoning content.""" + await manager.start_streaming() + + await manager.update_reasoning("Thinking step 1...") + assert manager.streaming_state.reasoning_content == "Thinking step 1..." + + await manager.update_reasoning("Thinking step 2...") + assert manager.streaming_state.reasoning_content == "Thinking step 2..." + + # Cleanup + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_stop_streaming_returns_content(self, manager): + """Test stopping streaming returns accumulated content.""" + await manager.start_streaming() + await manager.add_chunk({"content": "Test content"}) + + content = await manager.stop_streaming() + + assert content == "Test content" + # streaming_state is cleared after stop_streaming to ensure clean state + assert manager.streaming_state is None + assert not manager.is_streaming + + @pytest.mark.asyncio + async def test_stop_streaming_interrupted(self, manager): + """Test stopping streaming with interrupt flag.""" + await manager.start_streaming() + await manager.add_chunk({"content": "Partial"}) + + content = await manager.stop_streaming(interrupted=True) + + assert content == "Partial" + # streaming_state is cleared after stop_streaming to ensure clean state + assert manager.streaming_state is None + assert not manager.is_streaming + + @pytest.mark.asyncio + async def test_stop_streaming_without_start(self, manager): + """Test stopping streaming when not started.""" + content = await manager.stop_streaming() + + assert content == "" + + @pytest.mark.asyncio + async def test_start_tool_execution(self, manager): + """Test starting tool execution.""" + await manager.start_tool_execution("test_tool", {"arg": "value"}) + + assert manager.tool_execution is not None + assert manager.tool_execution.name == "test_tool" + assert manager.tool_execution.arguments == {"arg": "value"} + assert manager.is_tool_executing + assert manager.is_busy + + # Cleanup + await manager.stop_tool_execution("result") + + @pytest.mark.asyncio + async def test_stop_tool_execution_success(self, manager): + """Test stopping tool execution successfully.""" + await manager.start_tool_execution("test_tool", {}) + + await manager.stop_tool_execution("Success result", success=True) + + assert not manager.is_tool_executing + assert manager.tool_execution is None + + @pytest.mark.asyncio + async def test_stop_tool_execution_failure(self, manager): + """Test stopping tool execution with failure.""" + await manager.start_tool_execution("test_tool", {}) + + await manager.stop_tool_execution("Error message", success=False) + + assert not manager.is_tool_executing + assert manager.tool_execution is None + + @pytest.mark.asyncio + async def test_stop_tool_without_start(self, manager): + """Test stopping tool when not started.""" + # Should not raise + await manager.stop_tool_execution("result") + + @pytest.mark.asyncio + async def test_concurrent_streaming_and_tool(self, manager): + """Test streaming stops before tool starts.""" + await manager.start_streaming() + await manager.add_chunk({"content": "Response"}) + + # Starting tool while streaming + await manager.start_tool_execution("tool", {}) + + # Should have tool state but streaming should be stopped + assert manager.is_tool_executing + # Streaming might still be active depending on implementation + + # Cleanup + await manager.stop_streaming() + await manager.stop_tool_execution("result") + + @pytest.mark.asyncio + async def test_restart_streaming(self, manager): + """Test stopping and restarting streaming.""" + # First stream + await manager.start_streaming() + await manager.add_chunk({"content": "First"}) + content1 = await manager.stop_streaming() + + assert content1 == "First" + + # Second stream + await manager.start_streaming() + await manager.add_chunk({"content": "Second"}) + content2 = await manager.stop_streaming() + + assert content2 == "Second" + # Should be fresh state, not accumulated + assert content2 != "FirstSecond" + + @pytest.mark.asyncio + async def test_stop_streaming_interrupts_previous(self, manager): + """Test starting new stream interrupts previous.""" + await manager.start_streaming() + await manager.add_chunk({"content": "First"}) + + # Start new stream without stopping first + await manager.start_streaming() + + # Previous stream should be interrupted + assert manager.streaming_state.chunks_received == 0 + + @pytest.mark.asyncio + async def test_show_user_message(self, manager): + """Test showing user message (no exceptions).""" + # Should not raise + manager.show_user_message("Hello") + + @pytest.mark.asyncio + async def test_show_system_message(self, manager): + """Test showing system message (no exceptions).""" + # Should not raise + manager.show_system_message("System info") + + @pytest.mark.asyncio + async def test_refresh_loop_stops_cleanly(self, manager): + """Test refresh loop stops without errors.""" + await manager.start_streaming() + + # Let refresh loop run a bit + await asyncio.sleep(0.3) + + # Stop streaming + await manager.stop_streaming() + + # Should have no active tasks + assert manager._refresh_task is None or manager._refresh_task.done() + + @pytest.mark.asyncio + async def test_refresh_loop_with_tool(self, manager): + """Test refresh loop runs during tool execution.""" + await manager.start_tool_execution("tool", {}) + + # Let refresh loop run a bit + await asyncio.sleep(0.3) + + await manager.stop_tool_execution("result") + + # Should have no active tasks + assert manager._refresh_task is None or manager._refresh_task.done() + + @pytest.mark.asyncio + async def test_content_type_detection_in_stream(self, manager): + """Test content type is detected during streaming.""" + await manager.start_streaming() + + await manager.add_chunk({"content": "def hello():"}) + + assert manager.streaming_state.detected_type == ContentType.CODE + + # Cleanup + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_elapsed_time_tracking(self, manager): + """Test elapsed time is tracked.""" + await manager.start_streaming() + + # Add small delay + await asyncio.sleep(0.1) + + assert manager.streaming_state.elapsed_time >= 0.1 + + # Cleanup + await manager.stop_streaming() + + +class TestChukTermRenderer: + """Tests for ChukTermRenderer.""" + + def test_render_returns_content(self): + """Test renderer returns content as-is.""" + renderer = ChukTermRenderer() + + result = renderer.render("Test content", ContentType.TEXT) + + assert result == "Test content" + + def test_render_with_different_types(self): + """Test rendering different content types.""" + renderer = ChukTermRenderer() + + for content_type in ContentType: + result = renderer.render("Content", content_type) + assert result == "Content" + + +class TestDisplayManagerIntegration: + """Integration tests for display manager.""" + + @pytest.mark.asyncio + async def test_full_streaming_lifecycle(self): + """Test complete streaming lifecycle.""" + manager = StreamingDisplayManager() + + # Start streaming + await manager.start_streaming() + assert manager.is_streaming + + # Add chunks with various content + await manager.add_chunk({"content": "def hello():\n"}) + await manager.add_chunk({"content": " return 'world'\n"}) + await manager.add_chunk({"content": "", "finish_reason": "stop"}) + + # Update reasoning + await manager.update_reasoning("Generated a function") + + # Check state before stopping + assert manager.streaming_state.detected_type == ContentType.CODE + assert manager.streaming_state.chunks_received == 3 + assert manager.streaming_state.finish_reason == "stop" + + # Stop streaming + final = await manager.stop_streaming() + + assert final == "def hello():\n return 'world'\n" + # streaming_state is cleared after stop_streaming to ensure clean state + assert manager.streaming_state is None + assert not manager.is_streaming + + @pytest.mark.asyncio + async def test_full_tool_lifecycle(self): + """Test complete tool execution lifecycle.""" + manager = StreamingDisplayManager() + + # Start tool + await manager.start_tool_execution( + "database_query", {"query": "SELECT * FROM users"} + ) + assert manager.is_tool_executing + + # Simulate execution delay + await asyncio.sleep(0.1) + + # Stop with success + await manager.stop_tool_execution("42 rows returned", success=True) + + assert not manager.is_tool_executing + + @pytest.mark.asyncio + async def test_stream_then_tool_sequence(self): + """Test streaming followed by tool execution.""" + manager = StreamingDisplayManager() + + # Stream response + await manager.start_streaming() + await manager.add_chunk({"content": "I'll query the database"}) + await manager.stop_streaming() + + # Execute tool + await manager.start_tool_execution("query_db", {"table": "users"}) + await manager.stop_tool_execution("Results: ...") + + assert not manager.is_busy + + @pytest.mark.asyncio + async def test_interrupted_stream_handling(self): + """Test handling interrupted stream.""" + manager = StreamingDisplayManager() + + await manager.start_streaming() + await manager.add_chunk({"content": "Starting long response..."}) + + # User interrupts + content = await manager.stop_streaming(interrupted=True) + + assert content == "Starting long response..." + # streaming_state is cleared after stop_streaming to ensure clean state + assert manager.streaming_state is None + assert not manager.is_streaming diff --git a/tests/display/test_manager_coverage.py b/tests/display/test_manager_coverage.py new file mode 100644 index 00000000..52b8138f --- /dev/null +++ b/tests/display/test_manager_coverage.py @@ -0,0 +1,523 @@ +"""Additional tests for StreamingDisplayManager to increase coverage.""" + +import asyncio +import time + +import pytest + +from mcp_cli.display.manager import StreamingDisplayManager +from mcp_cli.display.models import StreamingState + + +class TestStreamingDisplayManagerCoverage: + """Additional tests for uncovered code paths in StreamingDisplayManager.""" + + @pytest.fixture + def manager(self): + """Create a display manager instance.""" + return StreamingDisplayManager() + + # ==================== STREAMING EDGE CASES ==================== + + @pytest.mark.asyncio + async def test_add_chunk_returns_early_when_no_state_created(self, manager): + """Test add_chunk handles edge case where start_streaming fails.""" + # This tests line 130 - the type guard after start_streaming + # Directly set streaming_state to None to simulate failure + manager.streaming_state = None + + # Mock start_streaming to not create state + original_start = manager.start_streaming + + async def mock_start(): + pass # Don't create state + + manager.start_streaming = mock_start + + # Should not raise, just return early + await manager.add_chunk({"content": "test"}) + + # Restore + manager.start_streaming = original_start + + @pytest.mark.asyncio + async def test_update_reasoning_without_state(self, manager): + """Test update_reasoning returns early when no streaming state.""" + # Line 148 - early return when no streaming_state + manager.streaming_state = None + + # Should not raise + await manager.update_reasoning("some reasoning") + + # ==================== TOOL EXECUTION TIMEOUT HANDLING ==================== + + @pytest.mark.asyncio + async def test_start_tool_execution_with_stuck_refresh_task(self, manager): + """Test start_tool_execution handles stuck refresh task.""" + # Lines 243-248 - timeout handling for stuck refresh task + await manager.start_streaming() + + # Create a mock task that takes a long time + async def slow_task(): + await asyncio.sleep(10) + + manager._refresh_task = asyncio.create_task(slow_task()) + manager._refresh_active = True + + # Starting tool should timeout and cancel the stuck task + await manager.start_tool_execution("test", {}) + + # Should have new tool state + assert manager.tool_execution is not None + assert manager.tool_execution.name == "test" + + # Cleanup + await manager.stop_tool_execution("done") + + @pytest.mark.asyncio + async def test_start_tool_clears_stale_streaming_state(self, manager): + """Test start_tool_execution clears inactive streaming state.""" + # Lines 267-273 and earlier - clearing stale streaming state + await manager.start_streaming() + await manager.add_chunk({"content": "test"}) + + # Complete the streaming (marks as inactive via complete()) + manager.streaming_state.complete() + + # Start tool - should clear stale streaming state + await manager.start_tool_execution("tool", {"arg": "val"}) + + assert manager.streaming_state is None + assert manager.tool_execution is not None + + # Cleanup + await manager.stop_tool_execution("done") + + # ==================== REFRESH LOOP HANDLING ==================== + + @pytest.mark.asyncio + async def test_stop_refresh_loop_timeout(self, manager): + """Test stop_refresh_loop handles timeout cancellation.""" + # Lines 355-360 - timeout and cancellation + + async def stuck_loop(): + while True: + await asyncio.sleep(0.01) + + manager._refresh_task = asyncio.create_task(stuck_loop()) + manager._refresh_active = True + + # Stop should timeout and cancel + await manager._stop_refresh_loop() + + assert manager._refresh_task is None + + @pytest.mark.asyncio + async def test_refresh_loop_error_handling(self, manager): + """Test refresh loop handles errors gracefully.""" + # Lines 389-390 - error logging in refresh loop + await manager.start_streaming() + + # Corrupt the state to cause an error + manager.streaming_state = "not a StreamingState" + + # Let loop run and hit error + await asyncio.sleep(0.2) + + # Stop - should not raise + manager._refresh_active = False + if manager._refresh_task: + manager._refresh_task.cancel() + try: + await manager._refresh_task + except asyncio.CancelledError: + pass + + @pytest.mark.asyncio + async def test_start_refresh_loop_when_already_running(self, manager): + """Test start_refresh_loop returns early when already running.""" + # Line 340 - early return when task exists + await manager.start_streaming() + + task1 = manager._refresh_task + + # Try to start again + await manager._start_refresh_loop() + + # Should be same task + assert manager._refresh_task is task1 + + # Cleanup + await manager.stop_streaming() + + # ==================== RENDERING STATUS MODES ==================== + + @pytest.mark.asyncio + async def test_render_streaming_status_with_content(self, manager): + """Test rendering when content exists (no reasoning).""" + # Lines 423-429 - content mode + await manager.start_streaming() + await manager.add_chunk({"content": "Hello world"}) + + # Let refresh loop render + await asyncio.sleep(0.2) + + # Should be in content mode + assert manager.streaming_state.content_length > 0 + + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_render_streaming_status_with_reasoning(self, manager): + """Test rendering with reasoning preview (no content).""" + # Lines 430-435 - thinking mode + await manager.start_streaming() + + # Set reasoning but no content + manager.streaming_state.reasoning_content = "Let me think about this..." + manager.streaming_state.accumulated_content = "" + + # Force reasoning preview update + manager._last_reasoning_update = 0 + manager._update_reasoning_preview() + + # Let refresh loop render + await asyncio.sleep(0.2) + + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_render_streaming_mode_switch(self, manager): + """Test switching from thinking mode to content mode.""" + # Lines 444-449 - mode switching detection + await manager.start_streaming() + + # Start with reasoning only + manager.streaming_state.reasoning_content = "Thinking..." + manager._showing_thinking = True + manager._last_reasoning_update = 0 + manager._update_reasoning_preview() + + await asyncio.sleep(0.15) + + # Then add content - triggers mode switch + await manager.add_chunk({"content": "Response"}) + + await asyncio.sleep(0.15) + + await manager.stop_streaming() + + # ==================== MULTI-LINE DISPLAY HANDLING ==================== + + @pytest.mark.asyncio + async def test_multiline_display_clear_and_rewrite(self, manager): + """Test clearing and rewriting multi-line display.""" + # Lines 467-481, 495-501 - multi-line handling + await manager.start_streaming() + + # Set up multi-line reasoning preview + manager.streaming_state.reasoning_content = "x" * 300 + manager._last_line_count = 3 + manager._showing_thinking = True + manager._last_reasoning_update = 0 + manager._update_reasoning_preview() + + # Force render + await manager._render_streaming_status() + + await manager.stop_streaming() + + @pytest.mark.asyncio + async def test_single_line_display_update(self, manager): + """Test single line status update.""" + # Lines 483-492 - single line clear and rewrite + await manager.start_streaming() + await manager.add_chunk({"content": "short"}) + + manager._last_line_count = 1 + + # Force render + await manager._render_streaming_status() + + await manager.stop_streaming() + + # ==================== CLEAR PREVIOUS LINES ==================== + + def test_clear_previous_lines_single_line(self, manager): + """Test clearing a single line.""" + # Lines 508-530 - clearing with single line + manager._last_line_count = 1 + + # Should not raise + manager._clear_previous_lines() + + def test_clear_previous_lines_multiple_lines(self, manager, capsys): + """Test clearing multiple lines.""" + # Lines 508-530 - clearing multiple lines + manager._last_line_count = 3 + + manager._clear_previous_lines() + + # Check ANSI codes were used + captured = capsys.readouterr() + # Should contain clear sequences + assert "\033[K" in captured.out or captured.out == "" + + def test_clear_previous_lines_zero_lines(self, manager): + """Test clearing when no lines to clear.""" + # Line 508-509 - early return + manager._last_line_count = 0 + + # Should return early, not raise + manager._clear_previous_lines() + + # ==================== CLEAR CURRENT DISPLAY ==================== + + @pytest.mark.asyncio + async def test_clear_current_display_async_multiline(self, manager): + """Test async clearing of multi-line display.""" + # Lines 540-556 - async version + manager._last_line_count = 3 + manager._last_status = "some status" + + await manager._clear_current_display_async() + + assert manager._last_line_count == 0 + assert manager._last_status == "" + + @pytest.mark.asyncio + async def test_clear_current_display_async_zero_lines(self, manager): + """Test async clearing when no lines.""" + manager._last_line_count = 0 + + await manager._clear_current_display_async() + + assert manager._last_line_count == 0 + + def test_clear_current_display_sync_multiline(self, manager): + """Test sync clearing of multi-line display.""" + # Lines 570-575 - sync version with multiple lines + manager._last_line_count = 3 + manager._last_status = "status" + + manager._clear_current_display() + + assert manager._last_line_count == 0 + assert manager._last_status == "" + + def test_clear_current_display_sync_zero_lines(self, manager): + """Test sync clearing when no lines to clear.""" + manager._last_line_count = 0 + + manager._clear_current_display() + + assert manager._last_line_count == 0 + + # ==================== SPLIT PREVIEW INTO LINES ==================== + + def test_split_preview_short_text(self, manager): + """Test splitting short text into lines.""" + # Lines 614-645 + result = manager._split_preview_into_lines("short text", max_line_len=80) + + assert len(result) == 1 + assert result[0] == "short text" + + def test_split_preview_with_ellipsis(self, manager): + """Test splitting text that starts with ellipsis.""" + result = manager._split_preview_into_lines( + "...continued from before", max_line_len=80 + ) + + assert len(result) >= 1 + assert result[0].startswith("...") + + def test_split_preview_long_text(self, manager): + """Test splitting long text into multiple lines.""" + long_text = " ".join(["word"] * 50) # ~250 chars + + result = manager._split_preview_into_lines( + long_text, max_line_len=80, num_lines=3 + ) + + assert len(result) <= 3 + for line in result: + assert len(line) <= 85 # Allow small overflow for word boundaries + + def test_split_preview_exact_lines(self, manager): + """Test text splits into exact number of lines.""" + # Create text that should split into exactly 3 lines + text = " ".join(["word"] * 30) + + result = manager._split_preview_into_lines(text, max_line_len=40, num_lines=3) + + assert len(result) <= 3 + + def test_split_preview_empty_text(self, manager): + """Test splitting empty text.""" + result = manager._split_preview_into_lines("", max_line_len=80) + + assert result == [] + + def test_split_preview_single_long_word(self, manager): + """Test handling single word longer than max_line_len.""" + long_word = "x" * 100 + + result = manager._split_preview_into_lines(long_word, max_line_len=80) + + # Should still return the word (won't be broken mid-word) + assert len(result) >= 1 + + # ==================== UPDATE REASONING PREVIEW ==================== + + def test_update_reasoning_preview_no_state(self, manager): + """Test update with no streaming state.""" + # Lines 656-658 + manager.streaming_state = None + + manager._update_reasoning_preview() + + assert manager._last_reasoning_preview == "" + + def test_update_reasoning_preview_no_reasoning(self, manager): + """Test update with empty reasoning content.""" + manager.streaming_state = StreamingState() + manager.streaming_state.reasoning_content = "" + + manager._update_reasoning_preview() + + assert manager._last_reasoning_preview == "" + + def test_update_reasoning_preview_time_debounce(self, manager): + """Test time-based debouncing.""" + # Lines 670-672 + manager.streaming_state = StreamingState() + manager.streaming_state.reasoning_content = "Test reasoning" + manager._last_reasoning_update = 0 # Force update + + manager._update_reasoning_preview() + + assert manager._last_reasoning_preview != "" + assert "💭 Thinking" in manager._last_reasoning_preview + + def test_update_reasoning_preview_chunk_debounce(self, manager): + """Test chunk-based debouncing.""" + # Lines 675-677 + manager.streaming_state = StreamingState() + manager.streaming_state.reasoning_content = "Test reasoning" + manager.streaming_state.chunks_received = 50 + manager._last_reasoning_chunk_count = 0 # Force chunk-based update + manager._last_reasoning_update = time.time() # Recent time update + + manager._update_reasoning_preview() + + # Should have updated due to chunk threshold + assert manager._last_reasoning_preview != "" + + def test_update_reasoning_preview_no_update_when_debounced(self, manager): + """Test preview not updated when debounced.""" + manager.streaming_state = StreamingState() + manager.streaming_state.reasoning_content = "Test reasoning" + manager._last_reasoning_update = time.time() + manager._last_reasoning_chunk_count = manager.streaming_state.chunks_received + manager._last_reasoning_preview = "old preview" + + manager._update_reasoning_preview() + + # Should keep old preview + assert manager._last_reasoning_preview == "old preview" + + def test_update_reasoning_preview_long_content(self, manager): + """Test reasoning preview with very long content (>1000 chars).""" + # Lines 688-691 - formatting length for 1k+ content + manager.streaming_state = StreamingState() + manager.streaming_state.reasoning_content = "x" * 1500 + manager._last_reasoning_update = 0 + + manager._update_reasoning_preview() + + # Should show formatted length with 'k' + assert "1.5k" in manager._last_reasoning_preview + + def test_update_reasoning_preview_short_content(self, manager): + """Test reasoning preview with short content (<1000 chars).""" + manager.streaming_state = StreamingState() + manager.streaming_state.reasoning_content = "x" * 500 + manager._last_reasoning_update = 0 + + manager._update_reasoning_preview() + + # Should show raw number + assert "500" in manager._last_reasoning_preview + + # ==================== RENDER TOOL STATUS ==================== + + @pytest.mark.asyncio + async def test_render_tool_status_without_tool(self, manager): + """Test render_tool_status returns early with no tool.""" + # Line 712 + manager.tool_execution = None + + await manager._render_tool_status() + + # Should not raise + + @pytest.mark.asyncio + async def test_render_tool_status_updates_live_status(self, manager): + """Test render_tool_status updates live status display.""" + await manager.start_tool_execution("test_tool", {"arg": "value"}) + + # Let it render + await asyncio.sleep(0.15) + + # Should have updated last_status + assert manager._last_status != "" + assert manager._last_line_count == 1 + + await manager.stop_tool_execution("done") + + # ==================== DO CLEAR DISPLAY ==================== + + def test_do_clear_display_with_lines(self, manager): + """Test _do_clear_display with lines to clear.""" + # Lines 265-277 + manager._last_line_count = 2 + manager._last_status = "some status" + + manager._do_clear_display() + + assert manager._last_line_count == 0 + assert manager._last_status == "" + + def test_do_clear_display_single_line(self, manager): + """Test _do_clear_display with single line.""" + manager._last_line_count = 1 + manager._last_status = "status" + + manager._do_clear_display() + + assert manager._last_line_count == 0 + + def test_do_clear_display_no_lines(self, manager): + """Test _do_clear_display with no lines.""" + manager._last_line_count = 0 + + manager._do_clear_display() + + assert manager._last_line_count == 0 + + # ==================== FINISH DISPLAY ==================== + + def test_finish_display(self, manager, capsys): + """Test _finish_display clears and adds newline.""" + manager._last_line_count = 1 + manager._last_status = "status" + + manager._finish_display() + + assert manager._last_line_count == 0 + assert manager._last_status == "" + + # Should have printed newline + captured = capsys.readouterr() + assert "\n" in captured.out diff --git a/tests/display/test_models.py b/tests/display/test_models.py new file mode 100644 index 00000000..f1afe479 --- /dev/null +++ b/tests/display/test_models.py @@ -0,0 +1,364 @@ +"""Tests for streaming display Pydantic models.""" + +import pytest +import time +from mcp_cli.display.models import ( + ContentType, + StreamingChunk, + StreamingPhase, + StreamingState, + DisplayUpdate, +) + + +class TestStreamingChunk: + """Tests for StreamingChunk model.""" + + def test_from_raw_chunk_chuk_llm_format(self): + """Test parsing chuk-llm format chunks.""" + raw = { + "response": "Hello world", + "tool_calls": [{"id": "1", "name": "test"}], + "reasoning_content": "Thinking...", + } + + chunk = StreamingChunk.from_raw_chunk(raw) + + assert chunk.content == "Hello world" + assert chunk.tool_calls == [{"id": "1", "name": "test"}] + assert chunk.reasoning_content == "Thinking..." + assert chunk.finish_reason is None + + def test_from_raw_chunk_openai_format(self): + """Test parsing OpenAI format chunks.""" + raw = { + "choices": [ + { + "delta": {"content": "Hello"}, + "finish_reason": "stop", + } + ] + } + + chunk = StreamingChunk.from_raw_chunk(raw) + + assert chunk.content == "Hello" + assert chunk.finish_reason == "stop" + + def test_from_raw_chunk_deepseek_reasoning(self): + """Test parsing DeepSeek reasoning content from delta.""" + raw = { + "choices": [ + { + "delta": {"reasoning_content": "Let me analyze this..."}, + "finish_reason": None, + } + ] + } + + chunk = StreamingChunk.from_raw_chunk(raw) + + assert chunk.reasoning_content == "Let me analyze this..." + assert chunk.content is None + + def test_from_raw_chunk_deepseek_mixed(self): + """Test parsing DeepSeek chunk with both reasoning and content.""" + raw = { + "choices": [ + { + "delta": { + "reasoning_content": "Thinking...", + "content": "Answer", + }, + } + ] + } + + chunk = StreamingChunk.from_raw_chunk(raw) + + assert chunk.reasoning_content == "Thinking..." + assert chunk.content == "Answer" + + def test_from_raw_chunk_simple_format(self): + """Test parsing simple content format.""" + raw = {"content": "Test content", "finish_reason": "length"} + + chunk = StreamingChunk.from_raw_chunk(raw) + + assert chunk.content == "Test content" + assert chunk.finish_reason == "length" + + def test_from_raw_chunk_empty(self): + """Test parsing empty chunk.""" + chunk = StreamingChunk.from_raw_chunk({}) + + assert chunk.content is None + assert chunk.tool_calls is None + assert chunk.reasoning_content is None + + def test_chunk_is_immutable(self): + """Test that chunks are frozen.""" + chunk = StreamingChunk(content="test") + + with pytest.raises(Exception): # Pydantic ValidationError + chunk.content = "modified" + + +class TestStreamingState: + """Tests for StreamingState model.""" + + def test_initial_state(self): + """Test initial state creation.""" + state = StreamingState() + + assert state.accumulated_content == "" + assert state.chunks_received == 0 + assert state.phase == StreamingPhase.INITIALIZING + assert state.detected_type == ContentType.UNKNOWN + assert not state.interrupted + assert state.is_active + assert not state.is_complete + + def test_add_text_chunk(self): + """Test adding text chunk.""" + state = StreamingState() + chunk = StreamingChunk(content="Hello ") + + state.add_chunk(chunk) + + assert state.accumulated_content == "Hello " + assert state.chunks_received == 1 + assert state.phase == StreamingPhase.RECEIVING + + def test_add_multiple_chunks(self): + """Test adding multiple chunks.""" + state = StreamingState() + + state.add_chunk(StreamingChunk(content="Hello ")) + state.add_chunk(StreamingChunk(content="world")) + state.add_chunk(StreamingChunk(content="!")) + + assert state.accumulated_content == "Hello world!" + assert state.chunks_received == 3 + + def test_detect_code_content_type(self): + """Test code content type detection.""" + state = StreamingState() + + state.add_chunk(StreamingChunk(content="def hello():")) + + assert state.detected_type == ContentType.CODE + + def test_detect_markdown_content_type(self): + """Test markdown content type detection.""" + state = StreamingState() + + state.add_chunk(StreamingChunk(content="## Heading")) + + assert state.detected_type == ContentType.MARKDOWN + + def test_detect_markdown_table_content_type(self): + """Test markdown table detection.""" + state = StreamingState() + + table = "| Col 1 | Col 2 |\n|-------|-------|" + state.add_chunk(StreamingChunk(content=table)) + + assert state.detected_type == ContentType.MARKDOWN_TABLE + + def test_detect_json_content_type(self): + """Test JSON content type detection.""" + state = StreamingState() + + state.add_chunk(StreamingChunk(content='{"key": "value"}')) + + assert state.detected_type == ContentType.JSON + + def test_detect_sql_content_type(self): + """Test SQL content type detection.""" + state = StreamingState() + + state.add_chunk(StreamingChunk(content="SELECT * FROM users")) + + assert state.detected_type == ContentType.SQL + + def test_content_type_locks_after_detection(self): + """Test that content type doesn't change after detection.""" + state = StreamingState() + + state.add_chunk(StreamingChunk(content="def hello():")) + assert state.detected_type == ContentType.CODE + + # Add markdown content - type should stay CODE + state.add_chunk(StreamingChunk(content="## Heading")) + assert state.detected_type == ContentType.CODE + + def test_reasoning_content(self): + """Test reasoning content accumulation.""" + state = StreamingState() + + chunk = StreamingChunk(content="Answer", reasoning_content="Let me think...") + state.add_chunk(chunk) + + assert state.reasoning_content == "Let me think..." + + def test_finish_reason(self): + """Test finish reason capture.""" + state = StreamingState() + + chunk = StreamingChunk(content="Done", finish_reason="stop") + state.add_chunk(chunk) + + assert state.finish_reason == "stop" + + def test_complete_normal(self): + """Test normal completion.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="Test")) + + state.complete() + + assert state.phase == StreamingPhase.COMPLETED + assert not state.interrupted + assert state.is_complete + assert not state.is_active + assert state.end_time is not None + + def test_complete_interrupted(self): + """Test interrupted completion.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="Test")) + + state.complete(interrupted=True) + + assert state.phase == StreamingPhase.INTERRUPTED + assert state.interrupted + assert state.is_complete + + def test_mark_error(self): + """Test error state.""" + state = StreamingState() + + state.mark_error() + + assert state.phase == StreamingPhase.ERROR + assert state.is_complete + assert state.end_time is not None + + def test_elapsed_time(self): + """Test elapsed time calculation.""" + state = StreamingState() + + # Add small delay + time.sleep(0.1) + + assert state.elapsed_time >= 0.1 + + def test_elapsed_time_after_completion(self): + """Test elapsed time uses end_time after completion.""" + state = StreamingState() + time.sleep(0.1) + state.complete() + + # Elapsed time should be frozen + elapsed1 = state.elapsed_time + time.sleep(0.1) + elapsed2 = state.elapsed_time + + # Should be nearly identical (end_time is set) + assert abs(elapsed1 - elapsed2) < 0.01 + + def test_content_length(self): + """Test content length property.""" + state = StreamingState() + + state.add_chunk(StreamingChunk(content="Hello")) + assert state.content_length == 5 + + state.add_chunk(StreamingChunk(content=" world")) + assert state.content_length == 11 + + +class TestDisplayUpdate: + """Tests for DisplayUpdate model.""" + + def test_from_state(self): + """Test creating update from state.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="def hello():")) + + update = DisplayUpdate.from_state(state) + + assert update.content == "def hello():" + assert update.content_type == ContentType.CODE + assert update.phase == StreamingPhase.RECEIVING + assert update.chunks_received == 1 + assert update.show_spinner is True + + def test_from_completed_state(self): + """Test update from completed state.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="Done")) + state.complete() + + update = DisplayUpdate.from_state(state) + + assert update.phase == StreamingPhase.COMPLETED + assert update.show_spinner is False + + def test_update_with_reasoning(self): + """Test update includes reasoning content.""" + state = StreamingState() + chunk = StreamingChunk(content="Answer", reasoning_content="Thinking...") + state.add_chunk(chunk) + + update = DisplayUpdate.from_state(state) + + assert update.reasoning_content == "Thinking..." + + def test_update_is_immutable(self): + """Test that updates are frozen.""" + state = StreamingState() + update = DisplayUpdate.from_state(state) + + with pytest.raises(Exception): # Pydantic ValidationError + update.content = "modified" + + +class TestContentTypeDetection: + """Tests for content type detection edge cases.""" + + def test_code_with_backticks(self): + """Test code blocks with backticks.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="Here's code:\n```python\n")) + + assert state.detected_type == ContentType.CODE + + def test_javascript_function(self): + """Test JavaScript function detection.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="function test() {")) + + assert state.detected_type == ContentType.CODE + + def test_html_markup(self): + """Test HTML detection.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="")) + + assert state.detected_type == ContentType.MARKUP + + def test_json_array(self): + """Test JSON array detection.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="[1, 2, 3]")) + + assert state.detected_type == ContentType.JSON + + def test_plain_text(self): + """Test plain text detection.""" + state = StreamingState() + state.add_chunk(StreamingChunk(content="Just some regular text.")) + + assert state.detected_type == ContentType.TEXT diff --git a/tests/display/test_renderers.py b/tests/display/test_renderers.py new file mode 100644 index 00000000..3f718fca --- /dev/null +++ b/tests/display/test_renderers.py @@ -0,0 +1,291 @@ +"""Tests for display renderers module.""" + +from mcp_cli.display.renderers import ( + render_streaming_status, + render_tool_execution_status, + show_final_streaming_response, + show_tool_execution_result, +) +from mcp_cli.display.models import StreamingState +from mcp_cli.chat.models import ToolExecutionState +import time + + +class TestRenderStreamingStatus: + """Tests for render_streaming_status function.""" + + def test_basic_streaming_status(self): + """Test basic streaming status rendering.""" + state = StreamingState() + state.chunks_received = 10 + state.accumulated_content = "Hello world" + + result = render_streaming_status(state, "⠙") + + assert "⠙" in result + assert "Streaming" in result + assert "(10 chunks)" in result + assert "11 chars" in result + assert "s" in result # time + + def test_streaming_with_reasoning(self): + """Test streaming status with cached reasoning preview.""" + state = StreamingState() + state.chunks_received = 5 + state.accumulated_content = "Answer" + state.reasoning_content = "Let me think about this problem step by step..." + + # Create formatted preview (as the manager would - multi-line format) + reasoning_preview = ( + f"💭 Thinking ({len(state.reasoning_content)} chars):\n ...step by step" + ) + + result = render_streaming_status( + state, "⠹", reasoning_preview=reasoning_preview + ) + + assert "⠹" in result + assert "Streaming" in result + assert "💭 Thinking" in result + assert str(len(state.reasoning_content)) in result + assert "...step by step" in result + + def test_streaming_without_reasoning(self): + """Test streaming status without reasoning.""" + state = StreamingState() + state.chunks_received = 3 + state.accumulated_content = "Test" + + result = render_streaming_status(state, "⠸") + + assert "⠸" in result + assert "Streaming" in result + assert "💭" not in result # No thinking indicator + + def test_streaming_long_reasoning(self): + """Test streaming with very long reasoning content.""" + state = StreamingState() + state.reasoning_content = "x" * 200 + + # Create formatted preview with truncation (multi-line format) + reasoning_preview = f"💭 Thinking (200 chars):\n ...{'x' * 27}" + + result = render_streaming_status( + state, "⠼", reasoning_preview=reasoning_preview + ) + + # Should have preview (truncated) + assert "200 chars" in result + assert "💭 Thinking" in result + assert "x" * 27 in result + + +class TestRenderToolExecutionStatus: + """Tests for render_tool_execution_status function.""" + + def test_basic_tool_status(self): + """Test basic tool execution status.""" + tool = ToolExecutionState( + name="test_tool", + arguments={}, + start_time=time.time(), + ) + + result = render_tool_execution_status(tool, "⠙", elapsed=1.5) + + assert "⠙" in result + assert "Executing tool: test_tool" in result + assert "(1.5s)" in result + + def test_tool_with_arguments(self): + """Test tool status with arguments.""" + tool = ToolExecutionState( + name="query_db", + arguments={"query": "SELECT * FROM users", "limit": 10}, + start_time=time.time(), + ) + + result = render_tool_execution_status(tool, "⠹", elapsed=2.0) + + assert "⠹" in result + assert "Executing tool: query_db" in result + assert "(2.0s)" in result + # Should show args preview with pipe separator + assert "|" in result + assert "query=" in result or "limit=" in result + + def test_tool_without_arguments(self): + """Test tool status without arguments.""" + tool = ToolExecutionState( + name="ping", + arguments={}, + start_time=time.time(), + ) + + result = render_tool_execution_status(tool, "⠸", elapsed=0.5) + + assert "Executing tool: ping" in result + assert "(0.5s)" in result + # Should not have pipe separator when no args + assert "|" not in result + + def test_tool_with_many_arguments(self): + """Test tool with many arguments (should preview first 2).""" + tool = ToolExecutionState( + name="complex_tool", + arguments={"a": "1", "b": "2", "c": "3", "d": "4"}, + start_time=time.time(), + ) + + result = render_tool_execution_status(tool, "⠼", elapsed=1.0) + + assert "Executing tool: complex_tool" in result + # Should show first 4 args (a, b, c, d) + assert "a=1" in result + assert "b=2" in result + + +class TestShowFinalStreamingResponse: + """Tests for show_final_streaming_response function.""" + + def test_normal_response(self, capsys): + """Test showing normal (not interrupted) response.""" + show_final_streaming_response( + content="This is the final response", + elapsed=2.5, + interrupted=False, + ) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "🤖 Assistant" in output + assert "(2.5s)" in output + assert "This is the final response" in output + assert "interrupted" not in output.lower() + + def test_interrupted_response(self, capsys): + """Test showing interrupted response.""" + show_final_streaming_response( + content="Partial response", + elapsed=1.0, + interrupted=True, + ) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "interrupted" in output.lower() + assert "⚠" in output + + +class TestShowToolExecutionResult: + """Tests for show_tool_execution_result function.""" + + def test_successful_tool_with_result(self, capsys): + """Test showing successful tool execution with result.""" + tool = ToolExecutionState( + name="fetch_data", + arguments={}, + start_time=time.time(), + result="Data fetched successfully", + success=True, + elapsed=1.5, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "✓" in output + assert "fetch_data" in output + assert "1.5" in output or "1.50s" in output + assert "Data fetched successfully" in output + + def test_successful_tool_without_result(self, capsys): + """Test showing successful tool with no result.""" + tool = ToolExecutionState( + name="ping", + arguments={}, + start_time=time.time(), + result=None, + success=True, + elapsed=0.5, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "✓" in output + assert "ping" in output + assert "completed" in output + + def test_failed_tool_with_error(self, capsys): + """Test showing failed tool execution.""" + tool = ToolExecutionState( + name="broken_tool", + arguments={}, + start_time=time.time(), + result="Connection timeout", + success=False, + elapsed=3.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # chuk-term error output may not preserve all formatting + # Just verify the error message is shown + assert "Error:" in output + assert "Connection timeout" in output + + def test_failed_tool_without_error(self, capsys): + """Test showing failed tool with no error message.""" + tool = ToolExecutionState( + name="mystery_fail", + arguments={}, + start_time=time.time(), + result=None, + success=False, + elapsed=1.0, + completed=True, + ) + + # Just verify function runs without error + # (chuk-term output may not be captured by capsys) + show_tool_execution_result(tool) + + # Function completed successfully + assert tool.completed is True + + def test_tool_with_long_result(self, capsys): + """Test that long results are truncated to 200 chars.""" + long_result = "x" * 300 + + tool = ToolExecutionState( + name="big_tool", + arguments={}, + start_time=time.time(), + result=long_result, + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Result should be truncated + assert "..." in output + # Should not contain full 300 chars + assert output.count("x") < 250 diff --git a/tests/display/test_renderers_coverage.py b/tests/display/test_renderers_coverage.py new file mode 100644 index 00000000..13cbf3e4 --- /dev/null +++ b/tests/display/test_renderers_coverage.py @@ -0,0 +1,532 @@ +"""Additional tests for renderers module to increase coverage.""" + +import json +import time + +from mcp_cli.chat.models import ToolExecutionState +from mcp_cli.display.renderers import ( + _sanitize_for_display, + show_tool_execution_result, +) + + +class TestSanitizeForDisplay: + """Tests for _sanitize_for_display function.""" + + def test_sanitize_newlines(self): + """Test sanitizing newline characters.""" + result = _sanitize_for_display("line1\nline2\nline3") + assert "\\n" in result + assert "\n" not in result + + def test_sanitize_carriage_return(self): + """Test sanitizing carriage return characters.""" + result = _sanitize_for_display("text\rmore") + assert "\\r" in result + assert "\r" not in result + + def test_sanitize_crlf(self): + """Test sanitizing CRLF sequences.""" + result = _sanitize_for_display("line1\r\nline2") + assert "\\r\\n" in result + + def test_sanitize_tab(self): + """Test sanitizing tab characters.""" + result = _sanitize_for_display("col1\tcol2") + assert "\\t" in result + assert "\t" not in result + + def test_sanitize_escape(self): + """Test sanitizing ESC character.""" + result = _sanitize_for_display("text\x1b[31mred") + assert "\\x1b" in result + assert "\x1b" not in result + + def test_sanitize_clean_text(self): + """Test that clean text is unchanged.""" + clean = "Hello world! 123" + result = _sanitize_for_display(clean) + assert result == clean + + def test_sanitize_mixed_control_chars(self): + """Test sanitizing mixed control characters.""" + result = _sanitize_for_display("a\nb\tc\rd") + assert "\\n" in result + assert "\\t" in result + assert "\\r" in result + + +class TestShowToolExecutionResultCoverage: + """Additional tests for show_tool_execution_result to cover all branches.""" + + def test_result_dict_with_many_keys(self, capsys): + """Test result with dict having more than 5 keys.""" + # Lines 157-158 - dict with more than 5 keys + result_data = {f"key{i}": f"value{i}" for i in range(10)} + + tool = ToolExecutionState( + name="dict_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Should mention total keys + assert "10 keys total" in output + + def test_result_dict_with_few_keys(self, capsys): + """Test result with dict having 5 or fewer keys.""" + result_data = {"a": 1, "b": 2, "c": 3} + + tool = ToolExecutionState( + name="small_dict", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Should show keys without "total" + assert "keys total" not in output + + def test_result_dict_with_nested_values(self, capsys): + """Test result dict with nested dict/list values.""" + # Lines 163-169 - nested values with JSON formatting + result_data = { + "nested_dict": {"inner": "value"}, + "nested_list": [1, 2, 3], + "simple": "string", + } + + tool = ToolExecutionState( + name="nested_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "nested_dict" in output or "nested_list" in output + + def test_result_dict_with_long_values(self, capsys): + """Test result dict with values that need truncation.""" + # Lines 165-166 - value truncation + result_data = {"long_value": "x" * 100} + + tool = ToolExecutionState( + name="long_value_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Should be truncated + assert "..." in output + + def test_result_dict_with_control_chars_in_values(self, capsys): + """Test result dict with control characters in values.""" + # Line 168 - sanitization of values + result_data = {"message": "line1\nline2\ttab"} + + tool = ToolExecutionState( + name="control_chars_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Should have sanitized control chars + assert "\\n" in output or "\\t" in output + + def test_result_list(self, capsys): + """Test result that is a JSON list.""" + # Lines 173-188 - list handling + result_data = ["item1", "item2", "item3", "item4", "item5"] + + tool = ToolExecutionState( + name="list_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "5 items" in output + assert "[0]" in output # First item index + + def test_result_list_with_many_items(self, capsys): + """Test result list with more than 3 items shows 'more' message.""" + # Lines 187-188 - "and X more" message + result_data = list(range(10)) + + tool = ToolExecutionState( + name="big_list", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "7 more" in output + + def test_result_list_with_nested_items(self, capsys): + """Test result list with nested dict/list items.""" + # Lines 177-180 - nested item formatting + result_data = [{"key": "value"}, [1, 2, 3], "simple"] + + tool = ToolExecutionState( + name="nested_list", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "[0]" in output + + def test_result_list_with_long_items(self, capsys): + """Test result list with items that need truncation.""" + # Lines 182-183 - item truncation + result_data = ["x" * 100] + + tool = ToolExecutionState( + name="long_item_list", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "..." in output + + def test_result_list_with_control_chars(self, capsys): + """Test result list with control chars in items.""" + # Lines 184-186 - sanitization + result_data = ["line1\nline2"] + + tool = ToolExecutionState( + name="control_list", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "\\n" in output + + def test_result_simple_json_value(self, capsys): + """Test result that is a simple JSON value (string, number, etc.).""" + # Lines 189-194 - simple value handling + tool = ToolExecutionState( + name="simple_tool", + arguments={}, + start_time=time.time(), + result=json.dumps("just a string"), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "just a string" in output + + def test_result_simple_json_long_value(self, capsys): + """Test simple JSON value that needs truncation.""" + # Lines 192-193 - long simple value + tool = ToolExecutionState( + name="long_simple", + arguments={}, + start_time=time.time(), + result=json.dumps("x" * 300), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "..." in output + # Should be truncated to ~200 chars + assert output.count("x") < 250 + + def test_result_json_number(self, capsys): + """Test result that is a JSON number.""" + tool = ToolExecutionState( + name="number_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(42), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "42" in output + + def test_result_json_boolean(self, capsys): + """Test result that is a JSON boolean.""" + tool = ToolExecutionState( + name="bool_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(True), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "True" in output + + def test_result_invalid_json(self, capsys): + """Test result that is not valid JSON.""" + # Lines 195-200 - JSONDecodeError handling + tool = ToolExecutionState( + name="invalid_json", + arguments={}, + start_time=time.time(), + result="not valid json {{{", + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Should show as plain string + assert "not valid json" in output + + def test_result_plain_string_long(self, capsys): + """Test plain string result that needs truncation.""" + # Lines 198-199 - long non-JSON string + tool = ToolExecutionState( + name="long_plain", + arguments={}, + start_time=time.time(), + result="x" * 300, + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "..." in output + + def test_result_plain_string_with_control_chars(self, capsys): + """Test plain string with control characters.""" + # Line 197 - sanitization of non-JSON + tool = ToolExecutionState( + name="control_plain", + arguments={}, + start_time=time.time(), + result="text\nwith\tnewlines\rand\ttabs", + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "\\n" in output or "\\t" in output + + def test_failed_tool_with_control_chars_in_error(self, capsys): + """Test failed tool with control chars in error message.""" + # Lines 204-206 - error sanitization + tool = ToolExecutionState( + name="failed_tool", + arguments={}, + start_time=time.time(), + result="Error:\nStack trace\nline 2", + success=False, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Error message should have sanitized newlines + assert "Error:" in output + assert "\\n" in output + + def test_result_empty_dict(self, capsys): + """Test result that is an empty dict.""" + tool = ToolExecutionState( + name="empty_dict", + arguments={}, + start_time=time.time(), + result=json.dumps({}), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + # Should not raise + captured = capsys.readouterr() + assert "empty_dict" in (captured.out + captured.err) + + def test_result_empty_list(self, capsys): + """Test result that is an empty list.""" + tool = ToolExecutionState( + name="empty_list", + arguments={}, + start_time=time.time(), + result=json.dumps([]), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "0 items" in output + + def test_result_null_json(self, capsys): + """Test result that is JSON null.""" + tool = ToolExecutionState( + name="null_tool", + arguments={}, + start_time=time.time(), + result=json.dumps(None), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + assert "None" in output + + def test_result_list_exactly_3_items(self, capsys): + """Test list with exactly 3 items (no 'more' message).""" + result_data = ["a", "b", "c"] + + tool = ToolExecutionState( + name="three_items", + arguments={}, + start_time=time.time(), + result=json.dumps(result_data), + success=True, + elapsed=1.0, + completed=True, + ) + + show_tool_execution_result(tool) + + captured = capsys.readouterr() + output = captured.out + captured.err + + # Should NOT have "more" message + assert "more" not in output.lower() + assert "[0]" in output + assert "[1]" in output + assert "[2]" in output diff --git a/tests/llm/test_system_prompt_generator.py b/tests/llm/test_system_prompt_generator.py deleted file mode 100644 index 67739dfd..00000000 --- a/tests/llm/test_system_prompt_generator.py +++ /dev/null @@ -1,113 +0,0 @@ -# test/llm/test_system_prompt_generator.py -import json -import pytest - -# SystemPromptGenerator tests -from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator - - -class TestSystemPromptGenerator: - """Unit‑tests for the SystemPromptGenerator class.""" - - @pytest.fixture(scope="function") - def tools_schema(self): - """Simple tools JSON schema used across tests.""" - return { - "tools": [ - { - "name": "echo", - "description": "Return whatever text you pass in", - "parameters": { - "type": "object", - "properties": {"text": {"type": "string"}}, - "required": ["text"], - }, - } - ] - } - - def test_prompt_contains_json_schema(self, tools_schema): - """Generated prompt should embed the tools JSON schema verbatim.""" - gen = SystemPromptGenerator() - prompt = gen.generate_prompt(tools_schema) - pretty_schema = json.dumps(tools_schema, indent=2) - assert pretty_schema in prompt - - def test_default_placeholders_replaced(self, tools_schema): - """All template placeholders must be substituted and defaults used when - optional args are omitted.""" - gen = SystemPromptGenerator() - prompt = gen.generate_prompt(tools_schema) - - # Defaults must appear - assert gen.default_user_system_prompt in prompt - assert gen.default_tool_config in prompt - - # No double‑braces placeholders should remain - assert "{{" not in prompt and "}}" not in prompt - - def test_custom_overrides(self, tools_schema): - """Caller‑supplied user prompt & tool config should override defaults.""" - gen = SystemPromptGenerator() - user_prompt = "You are Jarvis, a helpful assistant." - tool_cfg = "All network calls must go through the proxy." - prompt = gen.generate_prompt( - tools_schema, user_system_prompt=user_prompt, tool_config=tool_cfg - ) - - assert user_prompt in prompt - assert tool_cfg in prompt - # Defaults should no longer be present - assert gen.default_user_system_prompt not in prompt - assert gen.default_tool_config not in prompt - - -from mcp_cli.tools.manager import ToolManager # noqa: E402 - - -class TestFormatToolResponse: - """Unit‑tests for the ToolManager.format_tool_response static method.""" - - def test_text_record_list(self): - """List of text records should be flattened to line‑separated string.""" - records = [ - {"type": "text", "text": "Hello"}, - {"type": "text", "text": "World"}, - ] - out = ToolManager.format_tool_response(records) - assert out == "Hello\nWorld" - - def test_text_record_missing_field(self): - """Missing 'text' field should gracefully substitute placeholder.""" - records = [ - {"type": "text"}, - ] - out = ToolManager.format_tool_response(records) - assert out == "" # Empty string when no text field - - def test_data_record_list_serialised_to_json(self): - """Non‑text dict list should be preserved via JSON stringification.""" - rows = [{"id": 1, "name": "Alice"}, {"id": 2, "name": "Bob"}] - out = ToolManager.format_tool_response(rows) - - # Debug output if needed - print(f"DEBUG: Input: {rows}") - print(f"DEBUG: Output: '{out}' (len: {len(out)})") - - # Check that we got some output - assert out.strip(), f"Expected non-empty output, got: '{out}'" - - # Must be valid JSON and round‑trip equal - parsed = json.loads(out) - assert parsed == rows - - def test_single_dict_serialised(self): - data = {"status": "ok"} - out = ToolManager.format_tool_response(data) - parsed = json.loads(out) - assert parsed == data - - @pytest.mark.parametrize("scalar", [42, 3.14, True, None, "plain text"]) - def test_scalar_converted_to_string(self, scalar): - out = ToolManager.format_tool_response(scalar) - assert out == str(scalar) diff --git a/tests/model_management/test_model_manager.py b/tests/model_management/test_model_manager.py new file mode 100644 index 00000000..f9415085 --- /dev/null +++ b/tests/model_management/test_model_manager.py @@ -0,0 +1,726 @@ +# tests/model_management/test_model_manager.py +"""Tests for ModelManager class.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + +from mcp_cli.config.defaults import DEFAULT_PROVIDER +from mcp_cli.model_management.model_manager import ModelManager +from mcp_cli.model_management.provider import RuntimeProviderConfig +from mcp_cli.model_management.provider_discovery import ( + DiscoveryResult, + ProviderDiscovery, +) + + +class TestModelManagerInit: + """Tests for ModelManager initialization.""" + + @patch("mcp_cli.model_management.model_manager.ModelManager._initialize_chuk_llm") + @patch("mcp_cli.model_management.model_manager.ModelManager._load_custom_providers") + def test_init_calls_initialization_methods( + self, mock_load_custom: MagicMock, mock_init_chuk: MagicMock + ) -> None: + """Test that __init__ calls initialization methods.""" + ModelManager() # Just instantiate, don't need the reference + mock_init_chuk.assert_called_once() + mock_load_custom.assert_called_once() + + @patch.object(ModelManager, "_load_custom_providers") + def test_initialize_chuk_llm_success(self, mock_load: MagicMock) -> None: + """Test successful chuk_llm initialization.""" + mock_config = MagicMock() + + with patch("chuk_llm.configuration.get_config", return_value=mock_config): + manager = ModelManager() + + assert manager._chuk_config == mock_config + assert manager._active_provider == DEFAULT_PROVIDER + + @patch.object(ModelManager, "_load_custom_providers") + def test_initialize_chuk_llm_failure(self, mock_load: MagicMock) -> None: + """Test chuk_llm initialization failure falls back gracefully.""" + with patch( + "chuk_llm.configuration.get_config", + side_effect=Exception("Config error"), + ): + manager = ModelManager() + + assert manager._chuk_config is None + assert manager._active_provider == DEFAULT_PROVIDER + assert manager._active_model is None + + +class TestLoadCustomProviders: + """Tests for loading custom providers from preferences.""" + + def test_load_custom_providers_success(self) -> None: + """Test loading custom providers from preferences.""" + mock_prefs = MagicMock() + mock_prefs.get_custom_providers.return_value = { + "my-provider": { + "api_base": "http://localhost:8080", + "models": ["model1", "model2"], + "default_model": "model1", + } + } + + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch( + "mcp_cli.utils.preferences.get_preference_manager", + return_value=mock_prefs, + ): + manager = ModelManager() + + assert "my-provider" in manager._custom_providers + config = manager._custom_providers["my-provider"] + assert config.api_base == "http://localhost:8080" + assert config.models == ["model1", "model2"] + + def test_load_custom_providers_failure(self) -> None: + """Test that provider loading failure is handled gracefully.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch( + "mcp_cli.utils.preferences.get_preference_manager", + side_effect=Exception("Prefs error"), + ): + # Should not raise + manager = ModelManager() + assert manager._custom_providers == {} + + +class TestProviderManagement: + """Tests for provider management methods.""" + + @pytest.fixture + def manager(self) -> ModelManager: + """Create a ModelManager with mocked initialization.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch.object(ModelManager, "_load_custom_providers"): + mgr = ModelManager() + mgr._chuk_config = None + mgr._active_provider = "ollama" + mgr._custom_providers = {} + return mgr + + def test_get_available_providers_with_chuk_config( + self, manager: ModelManager + ) -> None: + """Test getting providers when chuk_config is available.""" + mock_config = MagicMock() + mock_config.get_all_providers.return_value = ["anthropic", "ollama", "openai"] + manager._chuk_config = mock_config + + providers = manager.get_available_providers() + + # Providers should be sorted alphabetically + assert providers == ["anthropic", "ollama", "openai"] + + def test_get_available_providers_without_chuk_config( + self, manager: ModelManager + ) -> None: + """Test getting providers when chuk_config is None.""" + manager._chuk_config = None + + providers = manager.get_available_providers() + + # Should return safe fallback (configured default provider) + assert providers == [DEFAULT_PROVIDER] + + def test_get_available_providers_includes_custom( + self, manager: ModelManager + ) -> None: + """Test that custom providers are included in the list.""" + manager._custom_providers = { + "my-custom": RuntimeProviderConfig( + name="my-custom", + api_base="http://localhost:8080", + models=["model1"], + ) + } + + providers = manager.get_available_providers() + + assert "my-custom" in providers + + def test_get_available_providers_handles_exception( + self, manager: ModelManager + ) -> None: + """Test that exceptions are handled gracefully.""" + mock_config = MagicMock() + mock_config.get_all_providers.side_effect = Exception("Config error") + manager._chuk_config = mock_config + + providers = manager.get_available_providers() + + # Should return safe fallback (configured default provider) + assert providers == [DEFAULT_PROVIDER] + + def test_add_runtime_provider_with_models(self, manager: ModelManager) -> None: + """Test adding a runtime provider with known models.""" + config = manager.add_runtime_provider( + name="my-runtime", + api_base="http://localhost:8080", + api_key="test-key", + models=["model1", "model2"], + ) + + assert config.name == "my-runtime" + assert config.api_base == "http://localhost:8080" + assert config.models == ["model1", "model2"] + assert config.is_runtime is True + assert "my-runtime" in manager._custom_providers + + def test_add_runtime_provider_discovers_models(self, manager: ModelManager) -> None: + """Test adding a runtime provider triggers model discovery.""" + mock_result = DiscoveryResult( + provider="my-runtime", + api_base="http://localhost:8080", + success=True, + models=["discovered-model1", "discovered-model2"], + ) + + with patch.object( + ProviderDiscovery, + "discover_models_from_api", + return_value=mock_result, + ): + config = manager.add_runtime_provider( + name="my-runtime", + api_base="http://localhost:8080", + api_key="test-key", + ) + + assert config.is_runtime is True + assert "discovered-model1" in config.models + + def test_add_runtime_provider_discovery_fails(self, manager: ModelManager) -> None: + """Test adding provider when discovery fails.""" + mock_result = DiscoveryResult( + provider="my-runtime", + api_base="http://localhost:8080", + success=False, + error="Connection refused", + ) + + with patch( + "mcp_cli.model_management.model_manager.ProviderDiscovery.discover_models_from_api", + return_value=mock_result, + ): + config = manager.add_runtime_provider( + name="my-runtime", + api_base="http://localhost:8080", + api_key="test-key", + ) + + assert config.models == [] + + def test_is_custom_provider(self, manager: ModelManager) -> None: + """Test is_custom_provider method.""" + manager._custom_providers = { + "custom": RuntimeProviderConfig( + name="custom", api_base="http://localhost", models=[] + ) + } + + assert manager.is_custom_provider("custom") is True + assert manager.is_custom_provider("ollama") is False + + def test_is_runtime_provider(self, manager: ModelManager) -> None: + """Test is_runtime_provider method.""" + manager._custom_providers = { + "runtime": RuntimeProviderConfig( + name="runtime", api_base="http://localhost", models=[], is_runtime=True + ), + "persisted": RuntimeProviderConfig( + name="persisted", + api_base="http://localhost", + models=[], + is_runtime=False, + ), + } + + assert manager.is_runtime_provider("runtime") is True + assert manager.is_runtime_provider("persisted") is False + assert manager.is_runtime_provider("nonexistent") is False + + +class TestModelManagement: + """Tests for model management methods.""" + + @pytest.fixture + def manager(self) -> ModelManager: + """Create a ModelManager with mocked initialization.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch.object(ModelManager, "_load_custom_providers"): + mgr = ModelManager() + mgr._chuk_config = None + mgr._active_provider = "ollama" + mgr._custom_providers = {} + return mgr + + def test_get_available_models_no_provider(self, manager: ModelManager) -> None: + """Test getting models without specifying provider uses active.""" + manager._active_provider = None + + models = manager.get_available_models() + + assert models == [] + + def test_get_available_models_custom_provider(self, manager: ModelManager) -> None: + """Test getting models from custom provider.""" + manager._custom_providers = { + "custom": RuntimeProviderConfig( + name="custom", + api_base="http://localhost", + models=["model1", "model2"], + ) + } + + models = manager.get_available_models("custom") + + assert models == ["model1", "model2"] + + def test_get_available_models_no_chuk_config(self, manager: ModelManager) -> None: + """Test getting models when chuk_config is None.""" + manager._chuk_config = None + + models = manager.get_available_models("ollama") + + assert models == [] + + def test_get_available_models_with_chuk_config(self, manager: ModelManager) -> None: + """Test getting models from chuk_llm config.""" + mock_list_providers = MagicMock( + return_value={"ollama": {"models": ["llama2", "mistral"]}} + ) + + manager._chuk_config = MagicMock() + + with patch( + "chuk_llm.llm.client.list_available_providers", + mock_list_providers, + ): + models = manager.get_available_models("ollama") + + assert models == ["llama2", "mistral"] + + def test_get_available_models_provider_error(self, manager: ModelManager) -> None: + """Test getting models when provider has error.""" + mock_list_providers = MagicMock( + return_value={"ollama": {"error": "Connection refused"}} + ) + + manager._chuk_config = MagicMock() + + with patch( + "chuk_llm.llm.client.list_available_providers", + mock_list_providers, + ): + models = manager.get_available_models("ollama") + + assert models == [] + + def test_get_available_models_exception(self, manager: ModelManager) -> None: + """Test getting models handles exceptions.""" + manager._chuk_config = MagicMock() + + with patch( + "chuk_llm.llm.client.list_available_providers", + side_effect=Exception("API error"), + ): + models = manager.get_available_models("ollama") + + assert models == [] + + def test_get_default_model_custom_provider(self, manager: ModelManager) -> None: + """Test getting default model from custom provider.""" + manager._custom_providers = { + "custom": RuntimeProviderConfig( + name="custom", + api_base="http://localhost", + models=["model1", "model2"], + default_model="model1", + ) + } + + default = manager.get_default_model("custom") + + assert default == "model1" + + def test_get_default_model_custom_provider_no_default( + self, manager: ModelManager + ) -> None: + """Test getting default model from custom provider without explicit default.""" + manager._custom_providers = { + "custom": RuntimeProviderConfig( + name="custom", + api_base="http://localhost", + models=["model1", "model2"], + ) + } + + default = manager.get_default_model("custom") + + # Should return first model + assert default == "model1" + + def test_get_default_model_chuk_config(self, manager: ModelManager) -> None: + """Test getting default model from chuk_llm config.""" + mock_provider_config = MagicMock() + mock_provider_config.default_model = "llama2" + + mock_config = MagicMock() + mock_config.get_provider.return_value = mock_provider_config + manager._chuk_config = mock_config + + default = manager.get_default_model("ollama") + + assert default == "llama2" + + def test_get_default_model_fallback(self, manager: ModelManager) -> None: + """Test getting default model falls back to first available.""" + manager._chuk_config = None + + with patch.object(manager, "get_available_models", return_value=["model1"]): + default = manager.get_default_model("ollama") + + assert default == "model1" + + def test_get_default_model_no_models(self, manager: ModelManager) -> None: + """Test getting default model when no models available.""" + manager._chuk_config = None + + with patch.object(manager, "get_available_models", return_value=[]): + default = manager.get_default_model("ollama") + + assert default == "default" + + def test_get_default_model_exception(self, manager: ModelManager) -> None: + """Test getting default model handles exceptions.""" + mock_config = MagicMock() + mock_config.get_provider.side_effect = Exception("Config error") + manager._chuk_config = mock_config + + with patch.object(manager, "get_available_models", return_value=["fallback"]): + default = manager.get_default_model("ollama") + + assert default == "fallback" + + +class TestRefreshModels: + """Tests for refresh_models method.""" + + @pytest.fixture + def manager(self) -> ModelManager: + """Create a ModelManager with mocked initialization.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch.object(ModelManager, "_load_custom_providers"): + mgr = ModelManager() + mgr._chuk_config = None + mgr._active_provider = "ollama" + mgr._custom_providers = {} + return mgr + + def test_refresh_models_custom_provider(self, manager: ModelManager) -> None: + """Test refreshing models for custom provider.""" + config = RuntimeProviderConfig( + name="custom", + api_base="http://localhost:8080", + api_key="test-key", + models=["model1"], + ) + manager._custom_providers = {"custom": config} + + with patch( + "mcp_cli.model_management.model_manager.ProviderDiscovery.refresh_provider_models", + return_value=2, + ): + count = manager.refresh_models("custom") + + assert count == 2 + + def test_refresh_models_custom_provider_failure( + self, manager: ModelManager + ) -> None: + """Test refreshing models for custom provider when it fails.""" + config = RuntimeProviderConfig( + name="custom", + api_base="http://localhost:8080", + models=["model1"], + ) + manager._custom_providers = {"custom": config} + + with patch( + "mcp_cli.model_management.model_manager.ProviderDiscovery.refresh_provider_models", + return_value=None, + ): + count = manager.refresh_models("custom") + + assert count == 0 + + def test_refresh_models_specific_provider(self, manager: ModelManager) -> None: + """Test refreshing models for a specific provider.""" + mock_refresh = MagicMock(return_value=["func1", "func2", "func3"]) + + with patch( + "chuk_llm.api.providers.refresh_provider_functions", + mock_refresh, + ): + count = manager.refresh_models("ollama") + + assert count == 3 + mock_refresh.assert_called_once_with("ollama") + + def test_refresh_models_uses_active_provider(self, manager: ModelManager) -> None: + """Test refreshing models uses active provider when None.""" + manager._active_provider = "anthropic" + mock_refresh = MagicMock(return_value=["func1", "func2"]) + + with patch( + "chuk_llm.api.providers.refresh_provider_functions", + mock_refresh, + ): + count = manager.refresh_models(None) + + assert count == 2 + mock_refresh.assert_called_once_with("anthropic") + + def test_refresh_models_openai_provider(self, manager: ModelManager) -> None: + """Test refreshing models for openai provider.""" + mock_refresh = MagicMock(return_value=["func1"]) + + with patch( + "chuk_llm.api.providers.refresh_provider_functions", + mock_refresh, + ): + count = manager.refresh_models("openai") + + assert count == 1 + mock_refresh.assert_called_once_with("openai") + + def test_refresh_models_exception(self, manager: ModelManager) -> None: + """Test refreshing models handles exceptions.""" + with patch( + "chuk_llm.api.providers.refresh_provider_functions", + side_effect=Exception("Refresh error"), + ): + count = manager.refresh_models("ollama") + + assert count == 0 + + +class TestActiveProviderModel: + """Tests for active provider/model management.""" + + @pytest.fixture + def manager(self) -> ModelManager: + """Create a ModelManager with mocked initialization.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch.object(ModelManager, "_load_custom_providers"): + mgr = ModelManager() + mgr._chuk_config = None + mgr._active_provider = "ollama" + mgr._active_model = None + mgr._custom_providers = {} + return mgr + + def test_get_active_provider(self, manager: ModelManager) -> None: + """Test getting active provider.""" + manager._active_provider = "anthropic" + + assert manager.get_active_provider() == "anthropic" + + def test_get_active_provider_fallback(self, manager: ModelManager) -> None: + """Test getting active provider when None.""" + manager._active_provider = None + + assert manager.get_active_provider() == DEFAULT_PROVIDER + + def test_get_active_model(self, manager: ModelManager) -> None: + """Test getting active model.""" + manager._active_model = "llama2" + + assert manager.get_active_model() == "llama2" + + def test_get_active_model_resolves_default(self, manager: ModelManager) -> None: + """Test getting active model resolves default when None.""" + manager._active_model = None + + with patch.object(manager, "get_default_model", return_value="default-model"): + model = manager.get_active_model() + + assert model == "default-model" + assert manager._active_model == "default-model" + + def test_set_active_provider(self, manager: ModelManager) -> None: + """Test setting active provider.""" + manager.set_active_provider("openai") + + assert manager._active_provider == "openai" + + def test_switch_provider(self, manager: ModelManager) -> None: + """Test switching provider.""" + with patch.object(manager, "get_default_model", return_value="gpt-4"): + manager.switch_provider("openai") + + assert manager._active_provider == "openai" + assert manager._active_model == "gpt-4" + + def test_switch_model(self, manager: ModelManager) -> None: + """Test switching to specific provider and model.""" + manager.switch_model("anthropic", "claude-3") + + assert manager._active_provider == "anthropic" + assert manager._active_model == "claude-3" + + +class TestClientManagement: + """Tests for client management.""" + + @pytest.fixture + def manager(self) -> ModelManager: + """Create a ModelManager with mocked initialization.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch.object(ModelManager, "_load_custom_providers"): + mgr = ModelManager() + mgr._chuk_config = None + mgr._active_provider = "ollama" + mgr._active_model = "llama2" + mgr._custom_providers = {} + mgr._client_factory = MagicMock() + return mgr + + def test_get_client_uses_active(self, manager: ModelManager) -> None: + """Test get_client uses active provider/model when not specified.""" + mock_client = MagicMock() + manager._client_factory.get_client.return_value = mock_client + + client = manager.get_client() + + manager._client_factory.get_client.assert_called_once_with( + "ollama", "llama2", chuk_config=None + ) + assert client == mock_client + + def test_get_client_specified_provider(self, manager: ModelManager) -> None: + """Test get_client with specified provider/model.""" + mock_client = MagicMock() + manager._client_factory.get_client.return_value = mock_client + + client = manager.get_client("openai", "gpt-4") + + manager._client_factory.get_client.assert_called_once_with( + "openai", "gpt-4", chuk_config=None + ) + assert client == mock_client + + def test_get_client_custom_provider(self, manager: ModelManager) -> None: + """Test get_client for custom provider.""" + config = RuntimeProviderConfig( + name="custom", api_base="http://localhost", models=["model1"] + ) + manager._custom_providers = {"custom": config} + mock_client = MagicMock() + manager._client_factory.get_client.return_value = mock_client + + client = manager.get_client("custom", "model1") + + manager._client_factory.get_client.assert_called_once_with( + "custom", "model1", config=config + ) + assert client == mock_client + + def test_get_client_no_provider_raises(self, manager: ModelManager) -> None: + """Test get_client raises when no provider available.""" + manager._active_provider = None + + with pytest.raises(ValueError, match="No provider specified"): + manager.get_client() + + +class TestValidation: + """Tests for validation methods.""" + + @pytest.fixture + def manager(self) -> ModelManager: + """Create a ModelManager with mocked initialization.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch.object(ModelManager, "_load_custom_providers"): + mgr = ModelManager() + mgr._chuk_config = None + mgr._active_provider = "ollama" + mgr._custom_providers = {} + return mgr + + def test_validate_provider_valid(self, manager: ModelManager) -> None: + """Test validate_provider for valid provider.""" + with patch.object( + manager, "get_available_providers", return_value=["ollama", "openai"] + ): + assert manager.validate_provider("ollama") is True + + def test_validate_provider_invalid(self, manager: ModelManager) -> None: + """Test validate_provider for invalid provider.""" + with patch.object( + manager, "get_available_providers", return_value=["ollama", "openai"] + ): + assert manager.validate_provider("invalid") is False + + def test_validate_model_valid(self, manager: ModelManager) -> None: + """Test validate_model for valid model.""" + with patch.object( + manager, "get_available_models", return_value=["llama2", "mistral"] + ): + assert manager.validate_model("llama2") is True + + def test_validate_model_invalid(self, manager: ModelManager) -> None: + """Test validate_model for invalid model.""" + with patch.object( + manager, "get_available_models", return_value=["llama2", "mistral"] + ): + assert manager.validate_model("invalid") is False + + def test_validate_model_no_provider(self, manager: ModelManager) -> None: + """Test validate_model when no provider specified or active.""" + manager._active_provider = None + + assert manager.validate_model("llama2") is False + + +class TestUtilityMethods: + """Tests for utility methods.""" + + @pytest.fixture + def manager(self) -> ModelManager: + """Create a ModelManager with mocked initialization.""" + with patch.object(ModelManager, "_initialize_chuk_llm"): + with patch.object(ModelManager, "_load_custom_providers"): + mgr = ModelManager() + mgr._chuk_config = None + mgr._active_provider = "ollama" + mgr._active_model = "llama2" + mgr._custom_providers = {} + mgr._client_factory = MagicMock() + return mgr + + def test_str(self, manager: ModelManager) -> None: + """Test __str__ method.""" + result = str(manager) + assert "ModelManager" in result + assert "ollama" in result + assert "llama2" in result + + def test_repr(self, manager: ModelManager) -> None: + """Test __repr__ method.""" + manager._client_factory.get_cache_size.return_value = 2 + + result = repr(manager) + + assert "ModelManager" in result + assert "ollama" in result + assert "llama2" in result + assert "cached_clients=2" in result diff --git a/tests/test_command_consistency.py b/tests/test_command_consistency.py index 902d18d6..c2a21cf2 100644 --- a/tests/test_command_consistency.py +++ b/tests/test_command_consistency.py @@ -89,7 +89,7 @@ async def test_interactive_adapter_preserves_quotes(self): command = 'exec echo_text \'{"message": "hello world"}\'' with patch( - "mcp_cli.commands.definitions.execute_tool.ExecuteToolCommand.execute" + "mcp_cli.commands.tools.execute_tool.ExecuteToolCommand.execute" ) as mock_exec: from mcp_cli.commands.base import CommandResult @@ -124,7 +124,7 @@ def test_all_modes_support_slash_commands(self): @pytest.mark.asyncio async def test_execute_command_json_validation(self): """Test that execute command validates JSON parameters correctly.""" - from mcp_cli.commands.definitions.execute_tool import ExecuteToolCommand + from mcp_cli.commands.tools.execute_tool import ExecuteToolCommand cmd = ExecuteToolCommand() @@ -156,7 +156,7 @@ async def test_execute_command_json_validation(self): @pytest.mark.asyncio async def test_execute_command_error_handling(self): """Test that execute command provides helpful error messages.""" - from mcp_cli.commands.definitions.execute_tool import ExecuteToolCommand + from mcp_cli.commands.tools.execute_tool import ExecuteToolCommand cmd = ExecuteToolCommand() @@ -175,9 +175,7 @@ async def test_execute_command_error_handling(self): mock_tool_manager.get_all_tools = AsyncMock(return_value=[mock_tool]) # Test with plain string (common mistake) - with patch( - "mcp_cli.commands.definitions.execute_tool.output.error" - ) as mock_error: + with patch("mcp_cli.commands.tools.execute_tool.output.error") as mock_error: result = await cmd.execute( tool_manager=mock_tool_manager, tool="echo_text", diff --git a/tests/tools/test_config_loader.py b/tests/tools/test_config_loader.py new file mode 100644 index 00000000..6f0a424e --- /dev/null +++ b/tests/tools/test_config_loader.py @@ -0,0 +1,686 @@ +# tests/tools/test_config_loader.py +"""Tests for MCP configuration loading.""" + +import json +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from mcp_cli.tools.config_loader import ConfigLoader +from mcp_cli.config.server_models import HTTPServerConfig, STDIOServerConfig + + +@pytest.fixture +def temp_config_file(tmp_path): + """Create a temporary config file.""" + config = { + "mcpServers": { + "http_server": { + "url": "https://example.com/mcp", + "headers": {"Authorization": "Bearer token"}, + }, + "sse_server": { + "url": "https://example.com/sse", + "transport": "sse", + }, + "stdio_server": { + "command": "python", + "args": ["-m", "server"], + "env": {"DEBUG": "1"}, + }, + } + } + config_path = tmp_path / "mcp_config.json" + config_path.write_text(json.dumps(config)) + return str(config_path) + + +@pytest.fixture +def token_config_file(tmp_path): + """Create a config file with token placeholders.""" + config = { + "mcpServers": { + "oauth_server": { + "url": "https://api.example.com", + "headers": {"Authorization": "{{token:github}}"}, + } + } + } + config_path = tmp_path / "token_config.json" + config_path.write_text(json.dumps(config)) + return str(config_path) + + +# ---------------------------------------------------------------------------- +# ConfigLoader initialization tests +# ---------------------------------------------------------------------------- + + +def test_config_loader_init(): + """Test ConfigLoader initialization.""" + loader = ConfigLoader("config.json", ["server1", "server2"]) + + assert loader.config_file == "config.json" + assert loader.servers == ["server1", "server2"] + assert loader.http_servers == [] + assert loader.sse_servers == [] + assert loader.stdio_servers == [] + + +# ---------------------------------------------------------------------------- +# Config loading tests +# ---------------------------------------------------------------------------- + + +def test_load_config(temp_config_file): + """Test loading a valid config file.""" + loader = ConfigLoader(temp_config_file, ["http_server"]) + + config = loader.load() + + assert "mcpServers" in config + assert "http_server" in config["mcpServers"] + + +def test_load_config_caches_result(temp_config_file): + """Test config is cached after first load.""" + loader = ConfigLoader(temp_config_file, ["http_server"]) + + config1 = loader.load() + config2 = loader.load() + + assert config1 is config2 + + +def test_load_config_file_not_found(): + """Test loading nonexistent config file.""" + loader = ConfigLoader("/nonexistent/config.json", []) + + config = loader.load() + + assert config == {} + + +def test_load_config_invalid_json(tmp_path): + """Test loading invalid JSON file.""" + config_path = tmp_path / "invalid.json" + config_path.write_text("not valid json {") + + loader = ConfigLoader(str(config_path), []) + config = loader.load() + + assert config == {} + + +# ---------------------------------------------------------------------------- +# Server type detection tests +# ---------------------------------------------------------------------------- + + +def test_detect_server_types_http(temp_config_file): + """Test detecting HTTP servers.""" + loader = ConfigLoader(temp_config_file, ["http_server"]) + config = loader.load() + + loader.detect_server_types(config) + + assert len(loader.http_servers) == 1 + assert len(loader.sse_servers) == 0 + assert len(loader.stdio_servers) == 0 + + http_server = loader.http_servers[0] + assert isinstance(http_server, HTTPServerConfig) + assert http_server.name == "http_server" + assert http_server.url == "https://example.com/mcp" + assert http_server.headers == {"Authorization": "Bearer token"} + + +def test_detect_server_types_sse(temp_config_file): + """Test detecting SSE servers.""" + loader = ConfigLoader(temp_config_file, ["sse_server"]) + config = loader.load() + + loader.detect_server_types(config) + + assert len(loader.http_servers) == 0 + assert len(loader.sse_servers) == 1 + assert len(loader.stdio_servers) == 0 + + sse_server = loader.sse_servers[0] + assert isinstance(sse_server, HTTPServerConfig) + assert sse_server.name == "sse_server" + + +def test_detect_server_types_stdio(temp_config_file): + """Test detecting STDIO servers.""" + loader = ConfigLoader(temp_config_file, ["stdio_server"]) + config = loader.load() + + loader.detect_server_types(config) + + assert len(loader.http_servers) == 0 + assert len(loader.sse_servers) == 0 + assert len(loader.stdio_servers) == 1 + + stdio_server = loader.stdio_servers[0] + assert isinstance(stdio_server, STDIOServerConfig) + assert stdio_server.name == "stdio_server" + assert stdio_server.command == "python" + assert stdio_server.args == ["-m", "server"] + assert stdio_server.env == {"DEBUG": "1"} + + +def test_detect_server_types_multiple(temp_config_file): + """Test detecting multiple server types.""" + loader = ConfigLoader( + temp_config_file, ["http_server", "sse_server", "stdio_server"] + ) + config = loader.load() + + loader.detect_server_types(config) + + assert len(loader.http_servers) == 1 + assert len(loader.sse_servers) == 1 + assert len(loader.stdio_servers) == 1 + + +def test_detect_server_types_unknown_server(temp_config_file): + """Test handling unknown server names.""" + loader = ConfigLoader(temp_config_file, ["unknown_server"]) + config = loader.load() + + loader.detect_server_types(config) + + assert len(loader.http_servers) == 0 + assert len(loader.sse_servers) == 0 + assert len(loader.stdio_servers) == 0 + + +def test_detect_server_types_clears_existing(temp_config_file): + """Test detect_server_types clears previous results.""" + loader = ConfigLoader(temp_config_file, ["http_server"]) + config = loader.load() + + # First detection + loader.detect_server_types(config) + assert len(loader.http_servers) == 1 + + # Second detection with different servers + loader.servers = ["stdio_server"] + loader.detect_server_types(config) + + assert len(loader.http_servers) == 0 + assert len(loader.stdio_servers) == 1 + + +def test_detect_server_types_disabled_server(tmp_path): + """Test detecting disabled servers.""" + config = { + "mcpServers": { + "disabled_server": { + "url": "https://example.com", + "disabled": True, + } + } + } + config_path = tmp_path / "config.json" + config_path.write_text(json.dumps(config)) + + loader = ConfigLoader(str(config_path), ["disabled_server"]) + loaded = loader.load() + loader.detect_server_types(loaded) + + assert len(loader.http_servers) == 1 + assert loader.http_servers[0].disabled is True + + +# ---------------------------------------------------------------------------- +# Token placeholder resolution tests +# ---------------------------------------------------------------------------- + + +def test_resolve_token_placeholder(token_config_file): + """Test resolving token placeholders.""" + loader = ConfigLoader(token_config_file, ["oauth_server"]) + + # Mock token store to return OAuth token data + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "github", + "data": {"access_token": "test_access_token"}, + } + ) + + with patch.object( + loader._token_store, "_retrieve_raw", return_value=stored_token_json + ): + config = loader.load() + + assert ( + config["mcpServers"]["oauth_server"]["headers"]["Authorization"] + == "Bearer test_access_token" + ) + + +def test_resolve_token_placeholder_no_token(token_config_file): + """Test handling missing tokens.""" + loader = ConfigLoader(token_config_file, ["oauth_server"]) + + # Mock token store to return None (no token found) + with patch.object(loader._token_store, "_retrieve_raw", return_value=None): + config = loader.load() + + # Should keep placeholder if no token + assert ( + config["mcpServers"]["oauth_server"]["headers"]["Authorization"] + == "{{token:github}}" + ) + + +def test_resolve_token_placeholder_nested(tmp_path): + """Test resolving tokens in nested structures.""" + config = { + "mcpServers": {"server": {"nested": {"deep": {"token": "{{token:provider}}"}}}} + } + config_path = tmp_path / "config.json" + config_path.write_text(json.dumps(config)) + + loader = ConfigLoader(str(config_path), ["server"]) + + # Mock token store to return OAuth token data + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "provider", + "data": {"access_token": "nested_token"}, + } + ) + + with patch.object( + loader._token_store, "_retrieve_raw", return_value=stored_token_json + ): + loaded = loader.load() + + assert ( + loaded["mcpServers"]["server"]["nested"]["deep"]["token"] + == "Bearer nested_token" + ) + + +def test_resolve_token_placeholder_in_list(tmp_path): + """Test resolving tokens in list values.""" + config = { + "mcpServers": { + "server": {"tokens": ["{{token:provider1}}", "{{token:provider2}}"]} + } + } + config_path = tmp_path / "config.json" + config_path.write_text(json.dumps(config)) + + loader = ConfigLoader(str(config_path), ["server"]) + + # Mock token store to return OAuth token data + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "provider", + "data": {"access_token": "list_token"}, + } + ) + + with patch.object( + loader._token_store, "_retrieve_raw", return_value=stored_token_json + ): + loaded = loader.load() + + assert loaded["mcpServers"]["server"]["tokens"] == [ + "Bearer list_token", + "Bearer list_token", + ] + + +# ---------------------------------------------------------------------------- +# OAuth refresh callback tests +# ---------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_no_url(): + """Test refresh callback with no URL.""" + loader = ConfigLoader("config.json", []) + + callback = loader.create_oauth_refresh_callback([], []) + result = await callback(server_url=None) + + assert result is None + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_unknown_url(): + """Test refresh callback with unknown URL.""" + loader = ConfigLoader("config.json", []) + + http_servers = [HTTPServerConfig(name="server1", url="https://known.com")] + callback = loader.create_oauth_refresh_callback(http_servers, []) + + result = await callback(server_url="https://unknown.com") + + assert result is None + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_maps_url(): + """Test refresh callback correctly maps URL to server.""" + loader = ConfigLoader("config.json", []) + + http_servers = [ + HTTPServerConfig(name="test_server", url="https://api.example.com/mcp") + ] + + callback = loader.create_oauth_refresh_callback(http_servers, []) + + # Mock TokenStoreFactory to return a mock store with no token + with patch("mcp_cli.tools.config_loader.TokenStoreFactory") as MockFactory: + mock_store = MagicMock() + mock_store._retrieve_raw.return_value = None + MockFactory.create.return_value = mock_store + result = await callback(server_url="https://api.example.com/mcp") + + assert result is None # No token found + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_no_refresh_token(): + """Test callback when no refresh token is available.""" + loader = ConfigLoader("config.json", []) + + http_servers = [HTTPServerConfig(name="test_server", url="https://api.example.com")] + + callback = loader.create_oauth_refresh_callback(http_servers, []) + + # Return token data without refresh_token + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "test_server", + "data": {"access_token": "old_token"}, # No refresh_token + } + ) + + with patch("mcp_cli.tools.config_loader.TokenStoreFactory") as MockFactory: + mock_store = MagicMock() + mock_store._retrieve_raw.return_value = stored_token_json + MockFactory.create.return_value = mock_store + + result = await callback(server_url="https://api.example.com") + + assert result is None + + +# ---------------------------------------------------------------------------- +# Additional coverage tests +# ---------------------------------------------------------------------------- + + +def test_load_config_general_exception(tmp_path): + """Test handling general exception during config load.""" + config_path = tmp_path / "config.json" + config_path.write_text('{"mcpServers": {}}') + + loader = ConfigLoader(str(config_path), []) + + # Mock json.load to raise a generic exception + with patch("builtins.open", side_effect=PermissionError("access denied")): + config = loader.load() + + assert config == {} + + +def test_resolve_token_placeholder_exception(tmp_path): + """Test handling exception during token resolution.""" + config = { + "mcpServers": { + "server": {"headers": {"Authorization": "{{token:failing_provider}}"}} + } + } + config_path = tmp_path / "config.json" + config_path.write_text(json.dumps(config)) + + loader = ConfigLoader(str(config_path), ["server"]) + + # Make _retrieve_raw raise an exception + with patch.object( + loader._token_store, "_retrieve_raw", side_effect=RuntimeError("token error") + ): + loaded = loader.load() + + # Should keep the placeholder on error + assert ( + loaded["mcpServers"]["server"]["headers"]["Authorization"] + == "{{token:failing_provider}}" + ) + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_success(): + """Test successful OAuth token refresh.""" + loader = ConfigLoader("config.json", []) + + http_servers = [HTTPServerConfig(name="test_server", url="https://api.example.com")] + + callback = loader.create_oauth_refresh_callback(http_servers, []) + + # Mock the full refresh flow + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "test_server", + "data": { + "access_token": "old_token", + "refresh_token": "refresh_token_value", + }, + } + ) + + with ( + patch("mcp_cli.tools.config_loader.TokenStoreFactory") as MockFactory, + patch("mcp_cli.tools.config_loader.OAuthHandler") as MockOAuth, + ): + mock_store = MagicMock() + mock_store._retrieve_raw.return_value = stored_token_json + MockFactory.create.return_value = mock_store + + mock_oauth = MockOAuth.return_value + mock_oauth.refresh_access_token = AsyncMock( + return_value={ + "access_token": "new_access_token", + "refresh_token": "new_refresh_token", + } + ) + + result = await callback(server_url="https://api.example.com") + + assert result == {"Authorization": "Bearer new_access_token"} + mock_store._store_raw.assert_called_once() + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_refresh_fails(): + """Test OAuth token refresh when refresh returns empty.""" + loader = ConfigLoader("config.json", []) + + http_servers = [HTTPServerConfig(name="test_server", url="https://api.example.com")] + + callback = loader.create_oauth_refresh_callback(http_servers, []) + + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "test_server", + "data": { + "access_token": "old_token", + "refresh_token": "refresh_token_value", + }, + } + ) + + with ( + patch("mcp_cli.tools.config_loader.TokenStoreFactory") as MockFactory, + patch("mcp_cli.tools.config_loader.OAuthHandler") as MockOAuth, + ): + mock_store = MagicMock() + mock_store._retrieve_raw.return_value = stored_token_json + MockFactory.create.return_value = mock_store + + mock_oauth = MockOAuth.return_value + mock_oauth.refresh_access_token = AsyncMock(return_value=None) + + result = await callback(server_url="https://api.example.com") + + assert result is None + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_exception(): + """Test OAuth token refresh when exception is raised.""" + loader = ConfigLoader("config.json", []) + + http_servers = [HTTPServerConfig(name="test_server", url="https://api.example.com")] + + callback = loader.create_oauth_refresh_callback(http_servers, []) + + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "test_server", + "data": { + "access_token": "old_token", + "refresh_token": "refresh_token_value", + }, + } + ) + + with ( + patch("mcp_cli.tools.config_loader.TokenStoreFactory") as MockFactory, + patch("mcp_cli.tools.config_loader.OAuthHandler") as MockOAuth, + ): + mock_store = MagicMock() + mock_store._retrieve_raw.return_value = stored_token_json + MockFactory.create.return_value = mock_store + + mock_oauth = MockOAuth.return_value + mock_oauth.refresh_access_token = AsyncMock( + side_effect=RuntimeError("network error") + ) + + result = await callback(server_url="https://api.example.com") + + assert result is None + + +@pytest.mark.asyncio +async def test_create_oauth_refresh_callback_sse_server(): + """Test OAuth refresh callback finds server in SSE list.""" + loader = ConfigLoader("config.json", []) + + sse_servers = [HTTPServerConfig(name="sse_server", url="https://sse.example.com")] + + callback = loader.create_oauth_refresh_callback([], sse_servers) + + with patch("mcp_cli.tools.config_loader.TokenStoreFactory") as MockFactory: + mock_store = MagicMock() + mock_store._retrieve_raw.return_value = None + MockFactory.create.return_value = mock_store + + result = await callback(server_url="https://sse.example.com") + + assert result is None # No token found, but server was mapped + + +# ---------------------------------------------------------------------------- +# Async loading tests (load_async method) +# ---------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_load_async_success(temp_config_file): + """Test successful async config loading.""" + loader = ConfigLoader(temp_config_file, ["http_server"]) + + config = await loader.load_async() + + assert "mcpServers" in config + assert "http_server" in config["mcpServers"] + + +@pytest.mark.asyncio +async def test_load_async_caches_result(temp_config_file): + """Test async load caches result.""" + loader = ConfigLoader(temp_config_file, ["http_server"]) + + config1 = await loader.load_async() + config2 = await loader.load_async() + + assert config1 is config2 + + +@pytest.mark.asyncio +async def test_load_async_file_not_found(): + """Test async loading nonexistent config file.""" + loader = ConfigLoader("/nonexistent/config.json", []) + + config = await loader.load_async() + + assert config == {} + + +@pytest.mark.asyncio +async def test_load_async_invalid_json(tmp_path): + """Test async loading invalid JSON file.""" + config_path = tmp_path / "invalid.json" + config_path.write_text("not valid json {") + + loader = ConfigLoader(str(config_path), []) + config = await loader.load_async() + + assert config == {} + + +@pytest.mark.asyncio +async def test_load_async_general_exception(tmp_path): + """Test async handling general exception during config load.""" + config_path = tmp_path / "config.json" + config_path.write_text('{"mcpServers": {}}') + + loader = ConfigLoader(str(config_path), []) + + # Mock asyncio.to_thread to raise a generic exception + with patch("asyncio.to_thread", side_effect=PermissionError("access denied")): + config = await loader.load_async() + + assert config == {} + + +@pytest.mark.asyncio +async def test_load_async_resolves_tokens(token_config_file): + """Test async loading resolves token placeholders.""" + loader = ConfigLoader(token_config_file, ["oauth_server"]) + + # Mock token store to return OAuth token data + stored_token_json = json.dumps( + { + "token_type": "oauth", + "name": "github", + "data": {"access_token": "async_token"}, + } + ) + + with patch.object( + loader._token_store, "_retrieve_raw", return_value=stored_token_json + ): + config = await loader.load_async() + + assert ( + config["mcpServers"]["oauth_server"]["headers"]["Authorization"] + == "Bearer async_token" + ) diff --git a/tests/tools/test_dynamic_tools.py b/tests/tools/test_dynamic_tools.py new file mode 100644 index 00000000..709886a9 --- /dev/null +++ b/tests/tools/test_dynamic_tools.py @@ -0,0 +1,551 @@ +# tests/tools/test_dynamic_tools.py +"""Tests for dynamic tool discovery functionality.""" + +import pytest +from unittest.mock import AsyncMock + +from chuk_tool_processor.discovery import DynamicToolName +from mcp_cli.tools.dynamic_tools import DynamicToolProvider +from mcp_cli.tools.models import ToolInfo, ToolCallResult + + +class DummyToolManager: + """Mock tool manager for testing DynamicToolProvider.""" + + def __init__(self, tools: list[ToolInfo] | None = None): + self.tools = tools or [] + self.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test_tool", + success=True, + result={"data": "test_result"}, + ) + ) + + async def get_all_tools(self) -> list[ToolInfo]: + return self.tools + + def format_tool_response(self, response): + if isinstance(response, dict): + import json + + return json.dumps(response) + return str(response) + + +# ---------------------------------------------------------------------------- +# DynamicToolName enum tests +# ---------------------------------------------------------------------------- + + +def test_dynamic_tool_name_values(): + """Verify enum values match expected tool names.""" + assert DynamicToolName.LIST_TOOLS.value == "list_tools" + assert DynamicToolName.SEARCH_TOOLS.value == "search_tools" + assert DynamicToolName.GET_TOOL_SCHEMA.value == "get_tool_schema" + assert DynamicToolName.CALL_TOOL.value == "call_tool" + + +def test_dynamic_tool_name_is_string_enum(): + """Ensure enum values can be used as strings.""" + assert isinstance(DynamicToolName.LIST_TOOLS.value, str) + assert DynamicToolName.LIST_TOOLS == "list_tools" + + +# ---------------------------------------------------------------------------- +# DynamicToolProvider tests +# ---------------------------------------------------------------------------- + + +@pytest.fixture +def sample_tools() -> list[ToolInfo]: + """Create sample tools for testing.""" + return [ + ToolInfo( + name="calculator", + namespace="math", + description="Performs mathematical calculations", + parameters={ + "type": "object", + "properties": { + "expression": {"type": "string", "description": "Math expression"}, + }, + "required": ["expression"], + }, + ), + ToolInfo( + name="weather", + namespace="api", + description="Gets current weather for a location", + parameters={ + "type": "object", + "properties": { + "city": {"type": "string", "description": "City name"}, + }, + "required": ["city"], + }, + ), + ToolInfo( + name="search", + namespace="web", + description="Searches the web for information", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "Search query"}, + }, + "required": ["query"], + }, + ), + ] + + +@pytest.fixture +def provider(sample_tools) -> DynamicToolProvider: + """Create a DynamicToolProvider with mock tool manager.""" + tool_manager = DummyToolManager(sample_tools) + return DynamicToolProvider(tool_manager) + + +def test_get_dynamic_tools_returns_all_tools(provider): + """Verify get_dynamic_tools returns all 5 dynamic tools.""" + tools = provider.get_dynamic_tools() + + assert len(tools) == 5 + + tool_names = {t["function"]["name"] for t in tools} + expected = { + DynamicToolName.LIST_TOOLS.value, + DynamicToolName.SEARCH_TOOLS.value, + DynamicToolName.GET_TOOL_SCHEMA.value, + DynamicToolName.GET_TOOL_SCHEMAS.value, + DynamicToolName.CALL_TOOL.value, + } + assert tool_names == expected + + +def test_get_dynamic_tools_format(provider): + """Verify dynamic tools follow OpenAI function format.""" + tools = provider.get_dynamic_tools() + + for tool in tools: + assert tool["type"] == "function" + assert "function" in tool + assert "name" in tool["function"] + assert "description" in tool["function"] + assert "parameters" in tool["function"] + + +def test_is_dynamic_tool_returns_true_for_dynamic_tools(provider): + """Verify is_dynamic_tool correctly identifies dynamic tools.""" + assert provider.is_dynamic_tool("list_tools") is True + assert provider.is_dynamic_tool("search_tools") is True + assert provider.is_dynamic_tool("get_tool_schema") is True + assert provider.is_dynamic_tool("call_tool") is True + + +def test_is_dynamic_tool_returns_false_for_regular_tools(provider): + """Verify is_dynamic_tool returns False for non-dynamic tools.""" + assert provider.is_dynamic_tool("calculator") is False + assert provider.is_dynamic_tool("weather") is False + assert provider.is_dynamic_tool("some_random_tool") is False + + +@pytest.mark.asyncio +async def test_list_tools(provider): + """Test list_tools returns tool summaries.""" + results = await provider.list_tools(limit=50) + + assert len(results) == 3 + names = {r["name"] for r in results} + assert names == {"calculator", "weather", "search"} + + # Check structure + for result in results: + assert "name" in result + assert "description" in result + assert "namespace" in result + + +@pytest.mark.asyncio +async def test_list_tools_respects_limit(provider): + """Test list_tools respects the limit parameter.""" + results = await provider.list_tools(limit=2) + assert len(results) == 2 + + +@pytest.mark.asyncio +async def test_search_tools_finds_matches(provider): + """Test search_tools finds tools matching query.""" + results = await provider.search_tools("calc", limit=10) + + assert len(results) >= 1 + assert any(r["name"] == "calculator" for r in results) + + +@pytest.mark.asyncio +async def test_search_tools_searches_descriptions(provider): + """Test search_tools searches in descriptions.""" + results = await provider.search_tools("weather", limit=10) + + assert len(results) >= 1 + assert any(r["name"] == "weather" for r in results) + + +@pytest.mark.asyncio +async def test_search_tools_respects_limit(provider): + """Test search_tools respects limit.""" + results = await provider.search_tools("a", limit=1) + assert len(results) <= 1 + + +@pytest.mark.asyncio +async def test_get_tool_schema_returns_schema(provider): + """Test get_tool_schema returns full schema for known tool.""" + schema = await provider.get_tool_schema("calculator") + + assert "function" in schema + assert schema["function"]["name"] == "calculator" + assert "parameters" in schema["function"] + + +@pytest.mark.asyncio +async def test_get_tool_schema_caches_results(provider): + """Test get_tool_schema caches results.""" + # First call + await provider.get_tool_schema("calculator") + assert "calculator" in provider._tool_cache + + # Second call should use cache + schema = await provider.get_tool_schema("calculator") + assert schema["function"]["name"] == "calculator" + + +@pytest.mark.asyncio +async def test_get_tool_schema_unknown_tool(provider): + """Test get_tool_schema returns error for unknown tool.""" + schema = await provider.get_tool_schema("nonexistent_tool") + assert "error" in schema + + +@pytest.mark.asyncio +async def test_call_tool_executes_tool(provider): + """Test call_tool delegates to tool_manager.""" + # Must fetch schema first (enforced workflow) + await provider.get_tool_schema("calculator") + result = await provider.call_tool("calculator", {"expression": "2+2"}) + + assert result["success"] is True + provider.tool_manager.execute_tool.assert_called_once() + + +@pytest.mark.asyncio +async def test_execute_dynamic_tool_list_tools(provider): + """Test execute_dynamic_tool handles list_tools.""" + result = await provider.execute_dynamic_tool("list_tools", {"limit": 10}) + + assert "result" in result + assert "count" in result + assert "total_available" in result + + +@pytest.mark.asyncio +async def test_execute_dynamic_tool_search_tools(provider): + """Test execute_dynamic_tool handles search_tools.""" + result = await provider.execute_dynamic_tool( + "search_tools", {"query": "calc", "limit": 5} + ) + + assert "result" in result + assert "count" in result + + +@pytest.mark.asyncio +async def test_execute_dynamic_tool_get_tool_schema(provider): + """Test execute_dynamic_tool handles get_tool_schema.""" + result = await provider.execute_dynamic_tool( + "get_tool_schema", {"tool_name": "calculator"} + ) + + assert "function" in result + + +@pytest.mark.asyncio +async def test_execute_dynamic_tool_call_tool(provider): + """Test execute_dynamic_tool handles call_tool.""" + result = await provider.execute_dynamic_tool( + "call_tool", {"tool_name": "calculator", "expression": "2+2"} + ) + + assert "success" in result + + +@pytest.mark.asyncio +async def test_execute_dynamic_tool_unknown(provider): + """Test execute_dynamic_tool returns error for unknown tool.""" + result = await provider.execute_dynamic_tool("unknown_tool", {}) + + assert "error" in result + assert "Unknown dynamic tool" in result["error"] + + +# ---------------------------------------------------------------------------- +# Additional coverage tests +# ---------------------------------------------------------------------------- + + +@pytest.fixture +def tools_with_long_descriptions() -> list[ToolInfo]: + """Create tools with long descriptions for truncation tests.""" + return [ + ToolInfo( + name="verbose_tool", + namespace="test", + description="A" * 250, # More than 200 chars + parameters={"type": "object", "properties": {}}, + ), + ] + + +@pytest.fixture +def provider_with_long_desc(tools_with_long_descriptions) -> DynamicToolProvider: + """Create provider with long description tools.""" + tool_manager = DummyToolManager(tools_with_long_descriptions) + return DynamicToolProvider(tool_manager) + + +@pytest.mark.asyncio +async def test_list_tools_truncates_long_descriptions(provider_with_long_desc): + """Test list_tools truncates descriptions over 200 chars.""" + results = await provider_with_long_desc.list_tools(limit=50) + + assert len(results) == 1 + desc = results[0]["description"] + assert len(desc) == 200 # 197 + "..." + assert desc.endswith("...") + + +@pytest.mark.asyncio +async def test_list_tools_exception_handling(): + """Test list_tools handles exceptions gracefully.""" + tool_manager = DummyToolManager([]) + # Make get_all_tools raise an exception + tool_manager.get_all_tools = AsyncMock(side_effect=RuntimeError("db error")) + provider = DynamicToolProvider(tool_manager) + + results = await provider.list_tools() + assert results == [] + + +@pytest.mark.asyncio +async def test_search_tools_truncates_long_descriptions(provider_with_long_desc): + """Test search_tools truncates descriptions over 200 chars.""" + results = await provider_with_long_desc.search_tools("verbose", limit=10) + + assert len(results) == 1 + desc = results[0]["description"] + assert len(desc) == 200 + assert desc.endswith("...") + + +@pytest.mark.asyncio +async def test_search_tools_exception_handling(): + """Test search_tools handles exceptions gracefully.""" + tool_manager = DummyToolManager([]) + tool_manager.get_all_tools = AsyncMock(side_effect=RuntimeError("db error")) + provider = DynamicToolProvider(tool_manager) + + results = await provider.search_tools("test") + assert results == [] + + +@pytest.mark.asyncio +async def test_get_tool_schema_exception_handling(): + """Test get_tool_schema handles exceptions gracefully.""" + tool_manager = DummyToolManager([]) + tool_manager.get_all_tools = AsyncMock(side_effect=RuntimeError("db error")) + provider = DynamicToolProvider(tool_manager) + + result = await provider.get_tool_schema("any_tool") + assert "error" in result + assert "db error" in result["error"] + + +@pytest.mark.asyncio +async def test_call_tool_unwraps_tool_result(): + """Test call_tool unwraps nested ToolResult objects.""" + + # Create a mock result with a nested .result attribute + class NestedResult: + def __init__(self): + self.result = {"actual": "data"} + + tools = [ + ToolInfo(name="test", namespace="test", description="Test tool", parameters={}), + ] + tool_manager = DummyToolManager(tools) + tool_manager.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test", + success=True, + result=NestedResult(), + ) + ) + + provider = DynamicToolProvider(tool_manager) + # Must fetch schema first (enforced workflow) + await provider.get_tool_schema("test") + result = await provider.call_tool("test", {}) + + assert result["success"] is True + + +@pytest.mark.asyncio +async def test_call_tool_unwraps_content_dict(): + """Test call_tool extracts 'content' from result dicts.""" + tools = [ + ToolInfo(name="test", namespace="test", description="Test tool", parameters={}), + ] + tool_manager = DummyToolManager(tools) + tool_manager.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test", + success=True, + result={"content": "extracted_value"}, + ) + ) + + provider = DynamicToolProvider(tool_manager) + # Must fetch schema first (enforced workflow) + await provider.get_tool_schema("test") + result = await provider.call_tool("test", {}) + + assert result["success"] is True + + +@pytest.mark.asyncio +async def test_call_tool_format_exception(): + """Test call_tool handles format_tool_response exception.""" + tools = [ + ToolInfo(name="test", namespace="test", description="Test tool", parameters={}), + ] + tool_manager = DummyToolManager(tools) + tool_manager.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test", + success=True, + result={"data": "test"}, + ) + ) + # Make format_tool_response raise an exception + tool_manager.format_tool_response = lambda x: (_ for _ in ()).throw( + ValueError("format error") + ) + + provider = DynamicToolProvider(tool_manager) + # Must fetch schema first (enforced workflow) + await provider.get_tool_schema("test") + result = await provider.call_tool("test", {}) + + # Should fallback to str() representation + assert result["success"] is True + assert "result" in result + + +@pytest.mark.asyncio +async def test_call_tool_failure(): + """Test call_tool handles tool execution failure.""" + tools = [ + ToolInfo(name="test", namespace="test", description="Test tool", parameters={}), + ] + tool_manager = DummyToolManager(tools) + tool_manager.execute_tool = AsyncMock( + return_value=ToolCallResult( + tool_name="test", + success=False, + error="execution failed", + ) + ) + + provider = DynamicToolProvider(tool_manager) + # Must fetch schema first (enforced workflow) + await provider.get_tool_schema("test") + result = await provider.call_tool("test", {}) + + assert result["success"] is False + assert result["error"] == "execution failed" + + +@pytest.mark.asyncio +async def test_call_tool_exception(): + """Test call_tool handles exceptions.""" + tools = [ + ToolInfo(name="test", namespace="test", description="Test tool", parameters={}), + ] + tool_manager = DummyToolManager(tools) + tool_manager.execute_tool = AsyncMock(side_effect=RuntimeError("network error")) + + provider = DynamicToolProvider(tool_manager) + # Must fetch schema first (enforced workflow) + await provider.get_tool_schema("test") + result = await provider.call_tool("test", {}) + + assert result["success"] is False + assert "network error" in result["error"] + + +@pytest.mark.asyncio +async def test_call_tool_auto_fetches_schema(): + """Test call_tool auto-fetches schema when not already fetched.""" + tools = [ + ToolInfo(name="test", namespace="test", description="Test tool", parameters={}), + ] + tool_manager = DummyToolManager(tools) + + provider = DynamicToolProvider(tool_manager) + # Don't fetch schema explicitly - call directly + # The provider should auto-fetch schema before execution + result = await provider.call_tool("test", {}) + + # Should succeed because schema is auto-fetched + assert result["success"] is True + # Schema should now be marked as fetched + assert "test" in provider._schema_fetched + + +@pytest.mark.asyncio +async def test_get_tool_schema_with_no_description(): + """Test get_tool_schema handles tools with no description.""" + tools = [ + ToolInfo( + name="no_desc_tool", + namespace="test", + description=None, + parameters={"type": "object", "properties": {}}, + ), + ] + tool_manager = DummyToolManager(tools) + provider = DynamicToolProvider(tool_manager) + + schema = await provider.get_tool_schema("no_desc_tool") + + assert schema["function"]["description"] == "No description provided" + + +@pytest.mark.asyncio +async def test_get_tool_schema_with_no_parameters(): + """Test get_tool_schema handles tools with no parameters.""" + tools = [ + ToolInfo( + name="no_params_tool", + namespace="test", + description="A tool without params", + parameters=None, + ), + ] + tool_manager = DummyToolManager(tools) + provider = DynamicToolProvider(tool_manager) + + schema = await provider.get_tool_schema("no_params_tool") + + assert schema["function"]["parameters"] == {"type": "object", "properties": {}} diff --git a/tests/tools/test_execution.py b/tests/tools/test_execution.py new file mode 100644 index 00000000..0a8513e8 --- /dev/null +++ b/tests/tools/test_execution.py @@ -0,0 +1,495 @@ +# tests/tools/test_execution.py +"""Tests for parallel and streaming tool execution.""" + +import asyncio +import pytest +from datetime import datetime + +from chuk_tool_processor import ToolCall as CTPToolCall +from chuk_tool_processor import ToolResult as CTPToolResult + +from mcp_cli.tools.execution import execute_tools_parallel, stream_execute_tools +from mcp_cli.tools.models import ToolCallResult + + +class MockToolManager: + """Mock ToolManager for testing execution functions.""" + + def __init__(self, results: dict[str, ToolCallResult] | None = None): + self.tool_timeout = 30.0 + self.results = results or {} + self.executed_tools: list[str] = [] + + async def execute_tool( + self, + tool_name: str, + arguments: dict, + namespace: str | None = None, + timeout: float | None = None, + ) -> ToolCallResult: + self.executed_tools.append(tool_name) + if tool_name in self.results: + return self.results[tool_name] + return ToolCallResult( + tool_name=tool_name, + success=True, + result={"output": f"result from {tool_name}"}, + ) + + +# ---------------------------------------------------------------------------- +# execute_tools_parallel tests +# ---------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_empty_calls(): + """Test with empty call list.""" + manager = MockToolManager() + results = await execute_tools_parallel(manager, []) + assert results == [] + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_single_call(): + """Test with single tool call.""" + manager = MockToolManager() + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={"x": 1})] + + results = await execute_tools_parallel(manager, calls) + + assert len(results) == 1 + assert results[0].tool == "test_tool" + assert results[0].is_success + assert manager.executed_tools == ["test_tool"] + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_multiple_calls(): + """Test with multiple tool calls.""" + manager = MockToolManager() + calls = [ + CTPToolCall(id="call_1", tool="tool_a", arguments={}), + CTPToolCall(id="call_2", tool="tool_b", arguments={}), + CTPToolCall(id="call_3", tool="tool_c", arguments={}), + ] + + results = await execute_tools_parallel(manager, calls) + + assert len(results) == 3 + assert set(r.tool for r in results) == {"tool_a", "tool_b", "tool_c"} + assert len(manager.executed_tools) == 3 + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_with_timeout(): + """Test with custom timeout.""" + manager = MockToolManager() + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={})] + + results = await execute_tools_parallel(manager, calls, timeout=60.0) + + assert len(results) == 1 + assert results[0].is_success + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_with_on_tool_start(): + """Test on_tool_start callback is invoked.""" + manager = MockToolManager() + started_tools: list[str] = [] + + async def on_start(call: CTPToolCall): + started_tools.append(call.tool) + + calls = [ + CTPToolCall(id="call_1", tool="tool_a", arguments={}), + CTPToolCall(id="call_2", tool="tool_b", arguments={}), + ] + + await execute_tools_parallel(manager, calls, on_tool_start=on_start) + + assert set(started_tools) == {"tool_a", "tool_b"} + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_with_on_tool_result(): + """Test on_tool_result callback is invoked.""" + manager = MockToolManager() + completed_tools: list[str] = [] + + async def on_result(result: CTPToolResult): + completed_tools.append(result.tool) + + calls = [ + CTPToolCall(id="call_1", tool="tool_a", arguments={}), + CTPToolCall(id="call_2", tool="tool_b", arguments={}), + ] + + await execute_tools_parallel(manager, calls, on_tool_result=on_result) + + assert set(completed_tools) == {"tool_a", "tool_b"} + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_callback_exception(): + """Test that callback exceptions don't break execution.""" + manager = MockToolManager() + + async def failing_callback(call: CTPToolCall): + raise ValueError("callback error") + + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={})] + + # Should not raise, just log warning + results = await execute_tools_parallel( + manager, calls, on_tool_start=failing_callback + ) + + assert len(results) == 1 + assert results[0].is_success + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_with_error(): + """Test handling tool execution errors.""" + manager = MockToolManager( + results={ + "failing_tool": ToolCallResult( + tool_name="failing_tool", + success=False, + error="tool failed", + ) + } + ) + + calls = [ + CTPToolCall(id="call_1", tool="failing_tool", arguments={}), + CTPToolCall(id="call_2", tool="success_tool", arguments={}), + ] + + results = await execute_tools_parallel(manager, calls) + + assert len(results) == 2 + failing = next(r for r in results if r.tool == "failing_tool") + success = next(r for r in results if r.tool == "success_tool") + + assert not failing.is_success + assert failing.error == "tool failed" + assert success.is_success + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_max_concurrency(): + """Test max_concurrency limits parallel execution.""" + execution_count = 0 + max_concurrent = 0 + + original_execute = MockToolManager.execute_tool + + async def tracking_execute( + self, tool_name, arguments, namespace=None, timeout=None + ): + nonlocal execution_count, max_concurrent + execution_count += 1 + current = execution_count + max_concurrent = max(max_concurrent, current) + await asyncio.sleep(0.01) # Simulate work + execution_count -= 1 + return await original_execute(self, tool_name, arguments, namespace, timeout) + + manager = MockToolManager() + manager.execute_tool = lambda *args, **kwargs: tracking_execute( + manager, *args, **kwargs + ) + + calls = [ + CTPToolCall(id=f"call_{i}", tool=f"tool_{i}", arguments={}) for i in range(10) + ] + + await execute_tools_parallel(manager, calls, max_concurrency=2) + + # Max concurrent should not exceed 2 (though timing may vary) + assert max_concurrent <= 3 # Allow some slack due to async timing + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_respects_namespace(): + """Test namespace is passed correctly.""" + manager = MockToolManager() + calls = [ + CTPToolCall(id="call_1", tool="test_tool", arguments={}, namespace="custom_ns") + ] + + results = await execute_tools_parallel(manager, calls) + + assert len(results) == 1 + + +# ---------------------------------------------------------------------------- +# stream_execute_tools tests +# ---------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_stream_execute_tools_empty_calls(): + """Test with empty call list.""" + manager = MockToolManager() + results = [] + async for result in stream_execute_tools(manager, []): + results.append(result) + assert results == [] + + +@pytest.mark.asyncio +async def test_stream_execute_tools_single_call(): + """Test streaming with single tool call.""" + manager = MockToolManager() + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={"x": 1})] + + results = [] + async for result in stream_execute_tools(manager, calls): + results.append(result) + + assert len(results) == 1 + assert results[0].tool == "test_tool" + assert results[0].is_success + + +@pytest.mark.asyncio +async def test_stream_execute_tools_multiple_calls(): + """Test streaming with multiple tool calls.""" + manager = MockToolManager() + calls = [ + CTPToolCall(id="call_1", tool="tool_a", arguments={}), + CTPToolCall(id="call_2", tool="tool_b", arguments={}), + CTPToolCall(id="call_3", tool="tool_c", arguments={}), + ] + + results = [] + async for result in stream_execute_tools(manager, calls): + results.append(result) + + assert len(results) == 3 + assert set(r.tool for r in results) == {"tool_a", "tool_b", "tool_c"} + + +@pytest.mark.asyncio +async def test_stream_execute_tools_with_on_tool_start(): + """Test on_tool_start callback in streaming mode.""" + manager = MockToolManager() + started_tools: list[str] = [] + + async def on_start(call: CTPToolCall): + started_tools.append(call.tool) + + calls = [ + CTPToolCall(id="call_1", tool="tool_a", arguments={}), + CTPToolCall(id="call_2", tool="tool_b", arguments={}), + ] + + results = [] + async for result in stream_execute_tools(manager, calls, on_tool_start=on_start): + results.append(result) + + assert set(started_tools) == {"tool_a", "tool_b"} + assert len(results) == 2 + + +@pytest.mark.asyncio +async def test_stream_execute_tools_yields_as_completed(): + """Test results are yielded as tools complete.""" + manager = MockToolManager() + yield_times: list[float] = [] + + calls = [ + CTPToolCall(id="call_1", tool="tool_a", arguments={}), + CTPToolCall(id="call_2", tool="tool_b", arguments={}), + ] + + import time + + start = time.time() + async for result in stream_execute_tools(manager, calls): + yield_times.append(time.time() - start) + + # Both should yield quickly (not waiting for all) + assert len(yield_times) == 2 + + +@pytest.mark.asyncio +async def test_stream_execute_tools_with_error(): + """Test streaming handles errors gracefully.""" + manager = MockToolManager( + results={ + "failing_tool": ToolCallResult( + tool_name="failing_tool", + success=False, + error="tool failed", + ) + } + ) + + calls = [ + CTPToolCall(id="call_1", tool="failing_tool", arguments={}), + CTPToolCall(id="call_2", tool="success_tool", arguments={}), + ] + + results = [] + async for result in stream_execute_tools(manager, calls): + results.append(result) + + assert len(results) == 2 + + +@pytest.mark.asyncio +async def test_stream_execute_tools_result_fields(): + """Test CTPToolResult has expected fields.""" + manager = MockToolManager() + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={})] + + results = [] + async for result in stream_execute_tools(manager, calls): + results.append(result) + + result = results[0] + assert result.id == "call_1" + assert result.tool == "test_tool" + assert isinstance(result.start_time, datetime) + assert isinstance(result.end_time, datetime) + assert result.machine # Should have hostname + assert result.pid > 0 # Should have process ID + + +# ---------------------------------------------------------------------------- +# Additional coverage tests +# ---------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_execute_tools_parallel_on_tool_result_callback_exception(): + """Test on_tool_result callback exception doesn't break execution.""" + manager = MockToolManager() + + async def failing_result_callback(result: CTPToolResult): + raise ValueError("callback error") + + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={})] + + # Should not raise, just log warning + results = await execute_tools_parallel( + manager, calls, on_tool_result=failing_result_callback + ) + + assert len(results) == 1 + assert results[0].is_success + + +@pytest.mark.asyncio +async def test_stream_execute_tools_on_tool_start_callback_exception(): + """Test on_tool_start callback exception in streaming mode.""" + manager = MockToolManager() + + async def failing_start_callback(call: CTPToolCall): + raise ValueError("callback error") + + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={})] + + results = [] + async for result in stream_execute_tools( + manager, calls, on_tool_start=failing_start_callback + ): + results.append(result) + + assert len(results) == 1 + assert results[0].is_success + + +@pytest.mark.asyncio +async def test_stream_execute_tools_cancellation(): + """Test stream_execute_tools handles cancellation gracefully.""" + manager = MockToolManager() + + # Create a slow tool that we can cancel + async def slow_execute(self, tool_name, arguments, namespace=None, timeout=None): + await asyncio.sleep(10) # Long delay + return ToolCallResult(tool_name=tool_name, success=True, result={}) + + manager.execute_tool = lambda *args, **kwargs: slow_execute( + manager, *args, **kwargs + ) + + calls = [ + CTPToolCall(id="call_1", tool="slow_tool_1", arguments={}), + CTPToolCall(id="call_2", tool="slow_tool_2", arguments={}), + ] + + results = [] + + async def collect_with_cancel(): + async for result in stream_execute_tools(manager, calls): + results.append(result) + # Cancel after receiving anything (but we won't receive anything since they're slow) + + task = asyncio.create_task(collect_with_cancel()) + await asyncio.sleep(0.1) # Let tasks start + task.cancel() + + try: + await task + except asyncio.CancelledError: + pass + + # Results should be empty since we cancelled before any completed + assert len(results) == 0 + + +@pytest.mark.asyncio +async def test_stream_execute_tools_with_timeout(): + """Test stream_execute_tools with custom timeout.""" + manager = MockToolManager() + calls = [CTPToolCall(id="call_1", tool="test_tool", arguments={})] + + results = [] + async for result in stream_execute_tools(manager, calls, timeout=60.0): + results.append(result) + + assert len(results) == 1 + assert results[0].is_success + + +@pytest.mark.asyncio +async def test_stream_execute_tools_max_concurrency(): + """Test stream_execute_tools respects max_concurrency.""" + execution_count = 0 + max_concurrent = 0 + + original_execute = MockToolManager.execute_tool + + async def tracking_execute( + self, tool_name, arguments, namespace=None, timeout=None + ): + nonlocal execution_count, max_concurrent + execution_count += 1 + current = execution_count + max_concurrent = max(max_concurrent, current) + await asyncio.sleep(0.01) # Simulate work + execution_count -= 1 + return await original_execute(self, tool_name, arguments, namespace, timeout) + + manager = MockToolManager() + manager.execute_tool = lambda *args, **kwargs: tracking_execute( + manager, *args, **kwargs + ) + + calls = [ + CTPToolCall(id=f"call_{i}", tool=f"tool_{i}", arguments={}) for i in range(5) + ] + + results = [] + async for result in stream_execute_tools(manager, calls, max_concurrency=2): + results.append(result) + + assert len(results) == 5 + # Max concurrent should not exceed 2 (with some slack for async timing) + assert max_concurrent <= 3 diff --git a/tests/tools/test_filter.py b/tests/tools/test_filter.py index fc185e75..51ffd23e 100644 --- a/tests/tools/test_filter.py +++ b/tests/tools/test_filter.py @@ -18,7 +18,7 @@ def test_tool_filter_initialization(self): assert tf.disabled_by_user == set() assert tf.auto_fix_enabled is True assert tf._validation_cache == {} - assert tf._fix_stats == {"attempted": 0, "successful": 0, "failed": 0} + assert tf._fix_stats.to_dict() == {"attempted": 0, "successful": 0, "failed": 0} def test_is_tool_enabled(self): """Test is_tool_enabled method.""" @@ -30,8 +30,10 @@ def test_is_tool_enabled(self): def test_disable_tool_user_reason(self): """Test disable_tool with user reason.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("tool1", reason="user") + tf.disable_tool("tool1", reason=DisabledReason.USER) assert "tool1" in tf.disabled_tools assert "tool1" in tf.disabled_by_user @@ -39,8 +41,10 @@ def test_disable_tool_user_reason(self): def test_disable_tool_validation_reason(self): """Test disable_tool with validation reason.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("tool2", reason="validation") + tf.disable_tool("tool2", reason=DisabledReason.VALIDATION) assert "tool2" in tf.disabled_tools assert "tool2" in tf.disabled_by_validation @@ -48,9 +52,11 @@ def test_disable_tool_validation_reason(self): def test_enable_tool(self): """Test enable_tool method.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("tool1", reason="user") - tf.disable_tool("tool2", reason="validation") + tf.disable_tool("tool1", reason=DisabledReason.USER) + tf.disable_tool("tool2", reason=DisabledReason.VALIDATION) tf.enable_tool("tool1") assert "tool1" not in tf.disabled_tools @@ -62,9 +68,11 @@ def test_enable_tool(self): def test_get_disabled_tools(self): """Test get_disabled_tools method.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("tool1", reason="user") - tf.disable_tool("tool2", reason="validation") + tf.disable_tool("tool1", reason=DisabledReason.USER) + tf.disable_tool("tool2", reason=DisabledReason.VALIDATION) disabled = tf.get_disabled_tools() assert disabled["tool1"] == "user" @@ -72,10 +80,12 @@ def test_get_disabled_tools(self): def test_get_disabled_tools_by_reason(self): """Test get_disabled_tools_by_reason method.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("tool1", reason="user") - tf.disable_tool("tool2", reason="user") - tf.disable_tool("tool3", reason="validation") + tf.disable_tool("tool1", reason=DisabledReason.USER) + tf.disable_tool("tool2", reason=DisabledReason.USER) + tf.disable_tool("tool3", reason=DisabledReason.VALIDATION) user_disabled = tf.get_disabled_tools_by_reason("user") assert "tool1" in user_disabled @@ -91,9 +101,11 @@ def test_get_disabled_tools_by_reason(self): def test_clear_validation_disabled(self): """Test clear_validation_disabled method.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("tool1", reason="validation") - tf.disable_tool("tool2", reason="user") + tf.disable_tool("tool1", reason=DisabledReason.VALIDATION) + tf.disable_tool("tool2", reason=DisabledReason.USER) tf.clear_validation_disabled() @@ -101,7 +113,7 @@ def test_clear_validation_disabled(self): assert "tool1" not in tf.disabled_by_validation assert "tool2" in tf.disabled_tools # User disabled should remain assert tf._validation_cache == {} - assert tf._fix_stats == {"attempted": 0, "successful": 0, "failed": 0} + assert tf._fix_stats.to_dict() == {"attempted": 0, "successful": 0, "failed": 0} def test_filter_tools_with_valid_openai_tools(self): """Test filter_tools with valid OpenAI tools.""" @@ -164,7 +176,7 @@ def test_filter_tools_auto_fix_enabled(self): assert len(valid) == 1 assert "title" not in valid[0]["function"] - assert tf._fix_stats["successful"] > 0 + assert tf._fix_stats.successful > 0 def test_filter_tools_auto_fix_disabled(self): """Test filter_tools with auto-fix disabled.""" @@ -185,12 +197,14 @@ def test_filter_tools_auto_fix_disabled(self): valid, invalid = tf.filter_tools(tools, provider="openai") assert len(valid) == 1 - assert tf._fix_stats["attempted"] == 0 + assert tf._fix_stats.attempted == 0 def test_filter_tools_with_manually_disabled(self): """Test filter_tools with manually disabled tools.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("disabled_tool", reason="user") + tf.disable_tool("disabled_tool", reason=DisabledReason.USER) tools = [ { @@ -241,9 +255,11 @@ def test_extract_tool_name_unknown(self): def test_get_validation_summary(self): """Test get_validation_summary method.""" + from mcp_cli.tools.filter import DisabledReason + tf = ToolFilter() - tf.disable_tool("tool1", reason="user") - tf.disable_tool("tool2", reason="validation") + tf.disable_tool("tool1", reason=DisabledReason.USER) + tf.disable_tool("tool2", reason=DisabledReason.VALIDATION) summary = tf.get_validation_summary() @@ -266,11 +282,14 @@ def test_get_fix_statistics(self): def test_reset_statistics(self): """Test reset_statistics method.""" tf = ToolFilter() - tf._fix_stats = {"attempted": 10, "successful": 8, "failed": 2} + # Manually set values on the Pydantic model + tf._fix_stats.attempted = 10 + tf._fix_stats.successful = 8 + tf._fix_stats.failed = 2 tf.reset_statistics() - assert tf._fix_stats == {"attempted": 0, "successful": 0, "failed": 0} + assert tf._fix_stats.to_dict() == {"attempted": 0, "successful": 0, "failed": 0} def test_set_auto_fix_enabled(self): """Test set_auto_fix_enabled method.""" diff --git a/tests/tools/test_models.py b/tests/tools/test_models.py index c4aef11e..76283673 100644 --- a/tests/tools/test_models.py +++ b/tests/tools/test_models.py @@ -4,12 +4,27 @@ Target: 90%+ coverage """ +from datetime import datetime + import pytest + from mcp_cli.tools.models import ( - ToolInfo, + ConversationMessage, + ExperimentalCapabilities, + FunctionDefinition, + LLMToolDefinition, + ResourceInfo, + ServerCapabilities, ServerInfo, + ToolCallMessage, ToolCallResult, - ResourceInfo, + ToolDefinitionInput, + ToolInfo, + ToolInputSchema, + ToolType, + TransportServerConfig, + TransportType, + ValidationResult, ) @@ -341,3 +356,602 @@ def test_resourceinfo_direct_creation(self): assert ri.type == "file" assert ri.extra["size"] == 1024 assert ri.extra["mime"] == "text/plain" + + +# ---------------------------------------------------------------------------- +# Additional coverage tests for 90%+ +# ---------------------------------------------------------------------------- + + +class TestTransportType: + """Test TransportType enum.""" + + def test_transport_type_values(self): + """Test all transport type values.""" + assert TransportType.STDIO.value == "stdio" + assert TransportType.HTTP.value == "http" + assert TransportType.SSE.value == "sse" + assert TransportType.UNKNOWN.value == "unknown" + + +class TestToolType: + """Test ToolType enum.""" + + def test_tool_type_values(self): + """Test tool type values.""" + assert ToolType.FUNCTION.value == "function" + + +class TestExperimentalCapabilities: + """Test ExperimentalCapabilities model.""" + + def test_experimental_capabilities_defaults(self): + """Test default values.""" + ec = ExperimentalCapabilities() + assert ec.sampling is False + assert ec.logging is False + assert ec.streaming is False + + def test_experimental_capabilities_with_values(self): + """Test with custom values.""" + ec = ExperimentalCapabilities(sampling=True, logging=True, streaming=True) + assert ec.sampling is True + assert ec.logging is True + assert ec.streaming is True + + +class TestServerCapabilities: + """Test ServerCapabilities model.""" + + def test_server_capabilities_defaults(self): + """Test default values.""" + sc = ServerCapabilities() + assert sc.tools is False + assert sc.prompts is False + assert sc.resources is False + assert isinstance(sc.experimental, ExperimentalCapabilities) + + def test_server_capabilities_from_dict(self): + """Test from_dict class method.""" + data = { + "tools": True, + "prompts": True, + "resources": True, + "experimental": {"sampling": True, "logging": False, "streaming": True}, + } + sc = ServerCapabilities.from_dict(data) + assert sc.tools is True + assert sc.prompts is True + assert sc.resources is True + assert sc.experimental.sampling is True + assert sc.experimental.streaming is True + + def test_server_capabilities_from_dict_no_experimental(self): + """Test from_dict without experimental.""" + data = {"tools": True} + sc = ServerCapabilities.from_dict(data) + assert sc.tools is True + assert isinstance(sc.experimental, ExperimentalCapabilities) + + def test_server_capabilities_to_dict(self): + """Test to_dict method.""" + sc = ServerCapabilities(tools=True, prompts=True) + result = sc.to_dict() + assert result["tools"] is True + assert result["prompts"] is True + assert "experimental" in result + + +class TestServerInfoCapabilities: + """Additional tests for ServerInfo capabilities methods.""" + + def test_serverinfo_get_capabilities_typed(self): + """Test get_capabilities_typed method.""" + si = ServerInfo( + id=1, + name="test", + status="ok", + tool_count=5, + namespace="ns", + capabilities={"tools": True, "prompts": True}, + ) + caps = si.get_capabilities_typed() + assert isinstance(caps, ServerCapabilities) + assert caps.tools is True + assert caps.prompts is True + + +class TestToolCallResultChuk: + """Test ToolCallResult chuk integration.""" + + def test_from_chuk_result(self): + """Test from_chuk_result class method.""" + + class MockChukResult: + tool = "test_tool" + result = {"data": "value"} + error = None + start_time = datetime(2024, 1, 1, 12, 0, 0) + end_time = datetime(2024, 1, 1, 12, 0, 5) + + chuk_result = MockChukResult() + tcr = ToolCallResult.from_chuk_result(chuk_result) + + assert tcr.tool_name == "test_tool" + assert tcr.success is True + assert tcr.result == {"data": "value"} + assert tcr.error is None + assert tcr.execution_time == 5.0 + assert tcr.chuk_result is chuk_result + + def test_from_chuk_result_with_error(self): + """Test from_chuk_result with error.""" + + class MockChukResult: + tool = "test_tool" + result = None + error = "Something failed" + start_time = None + end_time = None + + tcr = ToolCallResult.from_chuk_result(MockChukResult()) + + assert tcr.tool_name == "test_tool" + assert tcr.success is False + assert tcr.error == "Something failed" + assert tcr.execution_time is None + + def test_from_chuk_result_no_times(self): + """Test from_chuk_result without start/end times.""" + + class MockChukResult: + tool = "test_tool" + result = {"data": "value"} + error = None + start_time = datetime(2024, 1, 1, 12, 0, 0) + end_time = None # No end time + + tcr = ToolCallResult.from_chuk_result(MockChukResult()) + assert tcr.execution_time is None + + def test_is_cached_property(self): + """Test is_cached property.""" + # No chuk_result + tcr1 = ToolCallResult(tool_name="t", success=True) + assert tcr1.is_cached is False + + # With chuk_result without cached attr + class MockChukNoCached: + pass + + tcr2 = ToolCallResult( + tool_name="t", success=True, chuk_result=MockChukNoCached() + ) + assert tcr2.is_cached is False + + # With cached=True + class MockChukCached: + cached = True + + tcr3 = ToolCallResult(tool_name="t", success=True, chuk_result=MockChukCached()) + assert tcr3.is_cached is True + + def test_attempts_property(self): + """Test attempts property.""" + # No chuk_result + tcr1 = ToolCallResult(tool_name="t", success=True) + assert tcr1.attempts == 1 + + # With attempts + class MockChukAttempts: + attempts = 3 + + tcr2 = ToolCallResult( + tool_name="t", success=True, chuk_result=MockChukAttempts() + ) + assert tcr2.attempts == 3 + + def test_machine_property(self): + """Test machine property.""" + # No chuk_result + tcr1 = ToolCallResult(tool_name="t", success=True) + assert tcr1.machine is None + + # With machine + class MockChukMachine: + machine = "server1.local" + + tcr2 = ToolCallResult( + tool_name="t", success=True, chuk_result=MockChukMachine() + ) + assert tcr2.machine == "server1.local" + + # With None machine + class MockChukNullMachine: + machine = None + + tcr3 = ToolCallResult( + tool_name="t", success=True, chuk_result=MockChukNullMachine() + ) + assert tcr3.machine is None + + def test_extract_mcp_text_content(self): + """Test _extract_mcp_text_content method.""" + tcr = ToolCallResult(tool_name="t", success=True) + + # Non-dict result + assert tcr._extract_mcp_text_content("not a dict") is None + + # Dict without content key + assert tcr._extract_mcp_text_content({"data": "value"}) is None + + # MCP structure + class MockToolResultContent: + content = [{"type": "text", "text": "Hello"}] + + result = {"content": MockToolResultContent()} + assert tcr._extract_mcp_text_content(result) == "Hello" + + # Multiple text blocks + class MockToolResultMultiple: + content = [ + {"type": "text", "text": "Line 1"}, + {"type": "image", "data": "..."}, + {"type": "text", "text": "Line 2"}, + ] + + result2 = {"content": MockToolResultMultiple()} + assert tcr._extract_mcp_text_content(result2) == "Line 1\nLine 2" + + def test_display_result_with_mcp_content(self): + """Test display_result with MCP content structure.""" + + class MockToolResultContent: + content = [{"type": "text", "text": "MCP Output"}] + + tcr = ToolCallResult( + tool_name="t", success=True, result={"content": MockToolResultContent()} + ) + assert tcr.display_result == "MCP Output" + + def test_display_result_none_result(self): + """Test display_result with None result.""" + tcr = ToolCallResult(tool_name="t", success=True, result=None) + assert tcr.display_result == "" + + def test_display_result_non_serializable_dict(self): + """Test display_result with non-serializable dict.""" + + class NonSerializable: + pass + + tcr = ToolCallResult( + tool_name="t", success=True, result={"obj": NonSerializable()} + ) + # Should fall back to str() + assert "obj" in tcr.display_result + + def test_to_conversation_history_with_mcp_content(self): + """Test to_conversation_history with MCP content.""" + + class MockToolResultContent: + content = [{"type": "text", "text": "MCP History"}] + + tcr = ToolCallResult( + tool_name="t", success=True, result={"content": MockToolResultContent()} + ) + assert tcr.to_conversation_history() == "MCP History" + + def test_to_conversation_history_none_result(self): + """Test to_conversation_history with None result.""" + tcr = ToolCallResult(tool_name="t", success=True, result=None) + assert tcr.to_conversation_history() == "" + + def test_to_conversation_history_dict_result(self): + """Test to_conversation_history with dict result.""" + tcr = ToolCallResult(tool_name="t", success=True, result={"key": "value"}) + result = tcr.to_conversation_history() + assert '"key"' in result + assert '"value"' in result + + def test_to_conversation_history_non_serializable_dict(self): + """Test to_conversation_history with non-serializable dict.""" + + class NonSerializable: + pass + + tcr = ToolCallResult( + tool_name="t", success=True, result={"obj": NonSerializable()} + ) + # Should fall back to str() + result = tcr.to_conversation_history() + assert "obj" in result + + +class TestValidationResult: + """Test ValidationResult model.""" + + def test_validation_result_success(self): + """Test success factory method.""" + vr = ValidationResult.success() + assert vr.is_valid is True + assert vr.error_message is None + assert vr.warnings == [] + + def test_validation_result_failure(self): + """Test failure factory method.""" + vr = ValidationResult.failure("Something went wrong") + assert vr.is_valid is False + assert vr.error_message == "Something went wrong" + + def test_validation_result_from_tuple(self): + """Test from_tuple factory method.""" + vr1 = ValidationResult.from_tuple((True, None)) + assert vr1.is_valid is True + + vr2 = ValidationResult.from_tuple((False, "Error")) + assert vr2.is_valid is False + assert vr2.error_message == "Error" + + def test_validation_result_display_result(self): + """Test display_result property.""" + vr_success = ValidationResult.success() + assert vr_success.display_result == "Validation successful" + + vr_failure = ValidationResult.failure("Invalid schema") + assert "Invalid schema" in vr_failure.display_result + + vr_unknown = ValidationResult(is_valid=False) + assert "Unknown error" in vr_unknown.display_result + + def test_validation_result_has_error(self): + """Test has_error property.""" + vr_success = ValidationResult.success() + assert vr_success.has_error is False + + vr_failure = ValidationResult.failure("Error") + assert vr_failure.has_error is True + + +class TestTransportServerConfig: + """Test TransportServerConfig model.""" + + def test_transport_server_config_defaults(self): + """Test default values.""" + tsc = TransportServerConfig(name="server1", url="https://example.com") + assert tsc.name == "server1" + assert tsc.url == "https://example.com" + assert tsc.headers == {} + assert tsc.api_key is None + assert tsc.timeout is None + assert tsc.max_retries is None + + def test_transport_server_config_full(self): + """Test with all values.""" + tsc = TransportServerConfig( + name="server1", + url="https://example.com", + headers={"Authorization": "Bearer token"}, + api_key="api_key_123", + timeout=30.0, + max_retries=3, + ) + assert tsc.headers["Authorization"] == "Bearer token" + assert tsc.api_key == "api_key_123" + assert tsc.timeout == 30.0 + assert tsc.max_retries == 3 + + def test_transport_server_config_to_stream_manager(self): + """Test to_stream_manager_config method.""" + tsc = TransportServerConfig( + name="server1", url="https://example.com", timeout=30.0 + ) + config = tsc.to_stream_manager_config() + assert config["name"] == "server1" + assert config["url"] == "https://example.com" + assert config["timeout"] == 30.0 + # None values should be excluded + assert "api_key" not in config + + +class TestConversationMessage: + """Test ConversationMessage model.""" + + def test_conversation_message_user(self): + """Test user_message factory.""" + msg = ConversationMessage.user_message("Hello") + assert msg.role == "user" + assert msg.content == "Hello" + + def test_conversation_message_assistant(self): + """Test assistant_message factory.""" + msg = ConversationMessage.assistant_message("Response") + assert msg.role == "assistant" + assert msg.content == "Response" + assert msg.tool_calls is None + + def test_conversation_message_assistant_with_tools(self): + """Test assistant_message with tool calls.""" + tool_calls = [ + { + "id": "call_1", + "type": "function", + "function": {"name": "test", "arguments": "{}"}, + } + ] + msg = ConversationMessage.assistant_message(content=None, tool_calls=tool_calls) + assert msg.role == "assistant" + assert msg.tool_calls is not None + assert len(msg.tool_calls) == 1 + assert msg.tool_calls[0].id == "call_1" + + def test_conversation_message_system(self): + """Test system_message factory.""" + msg = ConversationMessage.system_message("System prompt") + assert msg.role == "system" + assert msg.content == "System prompt" + + def test_conversation_message_tool(self): + """Test tool_message factory.""" + msg = ConversationMessage.tool_message("Result", "call_1", name="my_tool") + assert msg.role == "tool" + assert msg.content == "Result" + assert msg.tool_call_id == "call_1" + assert msg.name == "my_tool" + + def test_conversation_message_to_dict(self): + """Test to_dict method.""" + msg = ConversationMessage.user_message("Test") + result = msg.to_dict() + assert result["role"] == "user" + assert result["content"] == "Test" + # None values excluded + assert "tool_calls" not in result + + def test_conversation_message_from_dict(self): + """Test from_dict factory.""" + data = {"role": "user", "content": "Hello"} + msg = ConversationMessage.from_dict(data) + assert msg.role == "user" + assert msg.content == "Hello" + + +class TestToolCallMessage: + """Test ToolCallMessage model.""" + + def test_tool_call_message(self): + """Test basic creation.""" + tcm = ToolCallMessage( + id="call_1", type="function", function={"name": "test", "arguments": "{}"} + ) + assert tcm.id == "call_1" + assert tcm.type == "function" + assert tcm.function["name"] == "test" + + +class TestFunctionDefinition: + """Test FunctionDefinition model.""" + + def test_function_definition_defaults(self): + """Test default values.""" + fd = FunctionDefinition(name="test", description="A test function") + assert fd.name == "test" + assert fd.description == "A test function" + assert fd.parameters == {"type": "object", "properties": {}} + + def test_function_definition_with_params(self): + """Test with parameters.""" + fd = FunctionDefinition( + name="add", + description="Add numbers", + parameters={ + "type": "object", + "properties": {"a": {"type": "number"}, "b": {"type": "number"}}, + "required": ["a", "b"], + }, + ) + assert fd.parameters["required"] == ["a", "b"] + + +class TestLLMToolDefinition: + """Test LLMToolDefinition model.""" + + def test_llm_tool_definition_defaults(self): + """Test default type.""" + ltd = LLMToolDefinition( + function=FunctionDefinition(name="test", description="Test") + ) + assert ltd.type == ToolType.FUNCTION + + def test_llm_tool_definition_to_dict(self): + """Test to_dict method.""" + ltd = LLMToolDefinition( + function=FunctionDefinition(name="test", description="Test") + ) + result = ltd.to_dict() + assert result["type"] == "function" + assert result["function"]["name"] == "test" + + +class TestToolInputSchema: + """Test ToolInputSchema model.""" + + def test_tool_input_schema_defaults(self): + """Test default values.""" + tis = ToolInputSchema() + assert tis.type == "object" + assert tis.properties == {} + assert tis.required == [] + assert tis.additionalProperties is False + + def test_tool_input_schema_with_values(self): + """Test with values.""" + tis = ToolInputSchema( + type="object", properties={"arg": {"type": "string"}}, required=["arg"] + ) + assert tis.properties["arg"]["type"] == "string" + assert tis.required == ["arg"] + + +class TestToolDefinitionInput: + """Test ToolDefinitionInput model.""" + + def test_tool_definition_input_defaults(self): + """Test default values.""" + tdi = ToolDefinitionInput(name="test") + assert tdi.name == "test" + assert tdi.namespace == "default" + assert tdi.description is None + assert tdi.inputSchema == {} + assert tdi.is_async is False + assert tdi.tags == [] + + def test_tool_definition_input_full(self): + """Test with all values.""" + tdi = ToolDefinitionInput( + name="test", + namespace="server", + description="Test tool", + inputSchema={"type": "object"}, + is_async=True, + tags=["tag1", "tag2"], + ) + assert tdi.namespace == "server" + assert tdi.description == "Test tool" + assert tdi.is_async is True + assert tdi.tags == ["tag1", "tag2"] + + +class TestToolInfoRequiredParametersEdgeCases: + """Additional edge case tests for ToolInfo.required_parameters.""" + + def test_required_parameters_non_list(self): + """Test required_parameters with non-list value.""" + ti = ToolInfo( + name="test", namespace="ns", parameters={"required": "not_a_list"} + ) + assert ti.required_parameters == [] + + +class TestToolCallResultOtherTypes: + """Test ToolCallResult with other result types.""" + + def test_display_result_with_list(self): + """Test display_result with list result.""" + tcr = ToolCallResult(tool_name="t", success=True, result=[1, 2, 3]) + assert tcr.display_result == "[1, 2, 3]" + + def test_display_result_with_int(self): + """Test display_result with int result.""" + tcr = ToolCallResult(tool_name="t", success=True, result=42) + assert tcr.display_result == "42" + + def test_to_conversation_history_with_list(self): + """Test to_conversation_history with list result.""" + tcr = ToolCallResult(tool_name="t", success=True, result=[1, 2, 3]) + assert tcr.to_conversation_history() == "[1, 2, 3]" + + def test_to_conversation_history_with_int(self): + """Test to_conversation_history with int result.""" + tcr = ToolCallResult(tool_name="t", success=True, result=42) + assert tcr.to_conversation_history() == "42" diff --git a/tests/tools/test_tool_manager.py b/tests/tools/test_tool_manager.py index 80d1f4f8..2707fe79 100644 --- a/tests/tools/test_tool_manager.py +++ b/tests/tools/test_tool_manager.py @@ -1,12 +1,17 @@ # tests/mcp_cli/tool/test_tool_processor.py -import pytest import json +import os +import pytest from typing import Dict, List, Tuple +from unittest.mock import AsyncMock, MagicMock, patch -from mcp_cli.tools.manager import ToolManager -from mcp_cli.tools.models import ToolInfo +from chuk_tool_processor import ToolCall as CTPToolCall from chuk_tool_processor import ToolInfo as RegistryToolInfo +from mcp_cli.tools.filter import DisabledReason +from mcp_cli.tools.manager import ToolManager, get_tool_manager, set_tool_manager +from mcp_cli.tools.models import ToolInfo + class DummyMeta: """Simple object mimicking the real metadata objects returned by a registry.""" @@ -184,3 +189,1182 @@ async def test_get_adapted_tools_for_llm_other_provider(manager): assert f["type"] == "function" assert "description" in f["function"] assert "parameters" in f["function"] + + +# ---------------------------------------------------------------------------- +# Additional coverage tests for 90%+ coverage +# ---------------------------------------------------------------------------- + + +class TestToolManagerInitialization: + """Test ToolManager initialization.""" + + def test_init_with_defaults(self): + """Test ToolManager with default parameters.""" + tm = ToolManager(config_file="test.json", servers=["server1"]) + assert tm.config_file == "test.json" + assert tm.servers == ["server1"] + assert tm.server_names == {} + assert tm.max_concurrency == 4 + + def test_init_with_custom_timeout(self): + """Test ToolManager with custom tool timeout.""" + tm = ToolManager(config_file="test.json", servers=[], tool_timeout=60.0) + assert tm.tool_timeout == 60.0 + + def test_init_with_custom_init_timeout(self): + """Test ToolManager with custom initialization timeout.""" + tm = ToolManager( + config_file="test.json", servers=[], initialization_timeout=180.0 + ) + assert tm.initialization_timeout == 180.0 + + +class TestToolManagerClose: + """Test ToolManager close method.""" + + @pytest.mark.asyncio + async def test_close_with_stream_manager(self): + """Test close method with stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.close = AsyncMock() + tm.stream_manager = mock_sm + + await tm.close() + mock_sm.close.assert_called_once() + + @pytest.mark.asyncio + async def test_close_with_exception(self): + """Test close handles exceptions gracefully.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.close = AsyncMock(side_effect=RuntimeError("close error")) + tm.stream_manager = mock_sm + + # Should not raise + await tm.close() + + @pytest.mark.asyncio + async def test_close_without_stream_manager(self): + """Test close without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + # Should not raise + await tm.close() + + +class TestToolManagerToolExecution: + """Test tool execution methods.""" + + @pytest.mark.asyncio + async def test_execute_tool_dynamic(self): + """Test execute_tool with dynamic tool.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.dynamic_tool_provider.execute_dynamic_tool = AsyncMock( + return_value={"result": "data"} + ) + + result = await tm.execute_tool("list_tools", {}) + + assert result.success is True + assert result.result == {"result": "data"} + + @pytest.mark.asyncio + async def test_execute_tool_dynamic_exception(self): + """Test execute_tool with dynamic tool that raises exception.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.dynamic_tool_provider.execute_dynamic_tool = AsyncMock( + side_effect=RuntimeError("dynamic error") + ) + + result = await tm.execute_tool("list_tools", {}) + + assert result.success is False + assert "dynamic error" in result.error + + @pytest.mark.asyncio + async def test_execute_tool_not_initialized(self): + """Test execute_tool when not initialized.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + result = await tm.execute_tool("regular_tool", {}) + + assert result.success is False + assert "not initialized" in result.error + + @pytest.mark.asyncio + async def test_execute_tool_success(self): + """Test successful tool execution.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.call_tool = AsyncMock(return_value={"output": "test"}) + tm.stream_manager = mock_sm + + result = await tm.execute_tool("my_tool", {"arg": "value"}) + + assert result.success is True + assert result.result == {"output": "test"} + + @pytest.mark.asyncio + async def test_execute_tool_transport_error(self): + """Test execute_tool with transport error. + + Note: Transport recovery is now handled by CTP middleware (retry, circuit breaker). + """ + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.call_tool = AsyncMock( + side_effect=RuntimeError("Transport not initialized") + ) + tm.stream_manager = mock_sm + + result = await tm.execute_tool("my_tool", {}) + + assert result.success is False + assert "Transport not initialized" in result.error + + @pytest.mark.asyncio + async def test_stream_execute_tool(self): + """Test stream_execute_tool method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.call_tool = AsyncMock(return_value={"output": "test"}) + tm.stream_manager = mock_sm + + results = [] + async for result in tm.stream_execute_tool("my_tool", {}): + results.append(result) + + assert len(results) == 1 + assert results[0].success is True + + +class TestToolManagerMiddleware: + """Test middleware functionality. + + Note: Transport recovery is now handled by CTP middleware. + These tests verify middleware status and configuration. + """ + + def test_middleware_enabled_default(self): + """Test middleware_enabled property without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + assert tm.middleware_enabled is False + + def test_middleware_enabled_with_stream_manager(self): + """Test middleware_enabled property with stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.middleware_enabled = True + tm.stream_manager = mock_sm + + assert tm.middleware_enabled is True + + def test_get_middleware_status_no_stream_manager(self): + """Test get_middleware_status without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + assert tm.get_middleware_status() is None + + def test_get_middleware_status_with_stream_manager(self): + """Test get_middleware_status with stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + + # Create a mock status object with model_dump + mock_status = MagicMock() + mock_status.model_dump.return_value = { + "retry": {"enabled": True, "max_retries": 3}, + "circuit_breaker": {"enabled": True, "failure_threshold": 5}, + "rate_limiting": None, + } + + mock_sm = MagicMock() + mock_sm.get_middleware_status.return_value = mock_status + tm.stream_manager = mock_sm + + status = tm.get_middleware_status() + + assert status is not None + assert status["retry"]["enabled"] is True + assert status["circuit_breaker"]["enabled"] is True + + def test_get_middleware_status_returns_none(self): + """Test get_middleware_status when stream manager returns None.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.get_middleware_status.return_value = None + tm.stream_manager = mock_sm + + assert tm.get_middleware_status() is None + + def test_get_middleware_status_exception(self): + """Test get_middleware_status handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.get_middleware_status.side_effect = RuntimeError("error") + tm.stream_manager = mock_sm + + assert tm.get_middleware_status() is None + + +class TestToolManagerLLMTools: + """Test LLM tool methods.""" + + @pytest.mark.asyncio + async def test_get_tools_for_llm_dynamic_mode(self): + """Test get_tools_for_llm with dynamic tools mode.""" + tm = ToolManager(config_file="test.json", servers=[]) + + with patch.dict(os.environ, {"MCP_CLI_DYNAMIC_TOOLS": "1"}): + tools = await tm.get_tools_for_llm() + + # Should return 5 dynamic tools (list, search, get_schema, get_schemas, call) + assert len(tools) == 5 + names = {t["function"]["name"] for t in tools} + assert "list_tools" in names + assert "search_tools" in names + assert "get_tool_schemas" in names + + @pytest.mark.asyncio + async def test_get_tools_for_llm_include_filter(self, manager): + """Test get_tools_for_llm with include filter.""" + with patch.dict(os.environ, {"MCP_CLI_INCLUDE_TOOLS": "t1"}): + tools = await manager.get_tools_for_llm() + + names = {t["function"]["name"] for t in tools} + assert names == {"t1"} + + @pytest.mark.asyncio + async def test_get_tools_for_llm_exclude_filter(self, manager): + """Test get_tools_for_llm with exclude filter.""" + with patch.dict(os.environ, {"MCP_CLI_EXCLUDE_TOOLS": "t1"}): + tools = await manager.get_tools_for_llm() + + names = {t["function"]["name"] for t in tools} + assert "t1" not in names + + @pytest.mark.asyncio + async def test_get_tools_for_llm_exception(self): + """Test get_tools_for_llm handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.get_all_tools = AsyncMock(side_effect=RuntimeError("error")) + + tools = await tm.get_tools_for_llm() + + assert tools == [] + + @pytest.mark.asyncio + async def test_get_adapted_tools_with_mapping(self, manager): + """Test get_adapted_tools_for_llm with custom mapping.""" + custom_mapping = {"t1": "renamed_t1", "t2": "renamed_t2"} + tools, mapping = await manager.get_adapted_tools_for_llm( + name_mapping=custom_mapping + ) + + assert mapping == custom_mapping + + +class TestToolManagerFiltering: + """Test tool filtering methods.""" + + def test_disable_tool(self): + """Test disable_tool method.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.disable_tool("test_tool") + + assert not tm.is_tool_enabled("test_tool") + + def test_enable_tool(self): + """Test enable_tool method.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.disable_tool("test_tool") + tm.enable_tool("test_tool") + + assert tm.is_tool_enabled("test_tool") + + def test_get_disabled_tools(self): + """Test get_disabled_tools method.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.disable_tool("tool1", DisabledReason.USER) + + disabled = tm.get_disabled_tools() + assert "tool1" in disabled + + def test_set_auto_fix_enabled(self): + """Test set_auto_fix_enabled method.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.set_auto_fix_enabled(True) + + assert tm.is_auto_fix_enabled() is True + + def test_clear_validation_disabled_tools(self): + """Test clear_validation_disabled_tools method.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.disable_tool("tool1", DisabledReason.VALIDATION) + tm.clear_validation_disabled_tools() + + # Should be enabled after clearing + assert tm.is_tool_enabled("tool1") + + def test_get_validation_summary(self): + """Test get_validation_summary method.""" + tm = ToolManager(config_file="test.json", servers=[]) + summary = tm.get_validation_summary() + + assert isinstance(summary, dict) + + +class TestToolManagerValidation: + """Test tool validation methods.""" + + @pytest.mark.asyncio + async def test_validate_single_tool_not_found(self): + """Test validate_single_tool with non-existent tool.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.get_all_tools = AsyncMock(return_value=[]) + + valid, error = await tm.validate_single_tool("nonexistent") + + assert valid is False + assert "not found" in error + + @pytest.mark.asyncio + async def test_validate_single_tool_valid(self): + """Test validate_single_tool with valid tool.""" + tm = ToolManager(config_file="test.json", servers=[]) + + # Mock get_all_tools to return a valid tool with proper format + tool = ToolInfo( + name="t1", + namespace="ns1", + description="Test tool description", + parameters={ + "type": "object", + "properties": { + "param1": {"type": "string", "description": "A parameter"} + }, + "required": [], + }, + ) + tm.get_all_tools = AsyncMock(return_value=[tool]) + + # Disable auto-fix to test validation properly + tm.tool_filter.set_auto_fix_enabled(False) + + valid, error = await tm.validate_single_tool("t1") + + # The tool should pass basic validation + # (the filter may still reject it, which is fine for coverage) + assert isinstance(valid, bool) + assert error is None or isinstance(error, str) + + @pytest.mark.asyncio + async def test_validate_single_tool_exception(self): + """Test validate_single_tool handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.get_all_tools = AsyncMock(side_effect=RuntimeError("error")) + + valid, error = await tm.validate_single_tool("any") + + assert valid is False + assert error is not None + + @pytest.mark.asyncio + async def test_revalidate_tools(self, manager): + """Test revalidate_tools method.""" + result = await manager.revalidate_tools() + + assert "total" in result + assert "valid" in result + assert "invalid" in result + + @pytest.mark.asyncio + async def test_revalidate_tools_exception(self): + """Test revalidate_tools handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.get_all_tools = AsyncMock(side_effect=RuntimeError("error")) + + result = await tm.revalidate_tools() + + assert result["total"] == 0 + assert result["invalid_tools"] == [] + + def test_get_tool_validation_details(self): + """Test get_tool_validation_details method.""" + tm = ToolManager(config_file="test.json", servers=[]) + details = tm.get_tool_validation_details("any_tool") + + assert details["name"] == "any_tool" + assert details["status"] == "unknown" + + +class TestToolManagerServerInfo: + """Test server info methods.""" + + @pytest.mark.asyncio + async def test_get_server_info_no_stream_manager(self): + """Test get_server_info without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + result = await tm.get_server_info() + + assert result == [] + + @pytest.mark.asyncio + async def test_get_server_for_tool_no_stream_manager(self): + """Test get_server_for_tool without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + result = await tm.get_server_for_tool("any_tool") + + assert result is None + + @pytest.mark.asyncio + async def test_get_server_for_tool_found(self): + """Test get_server_for_tool with existing tool.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = MagicMock() + + tool = ToolInfo(name="t1", namespace="ns1") + tm.get_all_tools = AsyncMock(return_value=[tool]) + + result = await tm.get_server_for_tool("t1") + + assert result == "ns1" + + @pytest.mark.asyncio + async def test_get_server_for_tool_not_found(self, manager): + """Test get_server_for_tool with non-existent tool.""" + manager.stream_manager = MagicMock() + + result = await manager.get_server_for_tool("nonexistent") + + assert result is None + + @pytest.mark.asyncio + async def test_get_server_for_tool_exception(self): + """Test get_server_for_tool handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = MagicMock() + tm.get_all_tools = AsyncMock(side_effect=RuntimeError("error")) + + result = await tm.get_server_for_tool("any") + + assert result is None + + def test_get_streams_no_stream_manager(self): + """Test get_streams without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + result = tm.get_streams() + + assert result == [] + + def test_get_streams_with_method(self): + """Test get_streams with stream manager that has method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.get_streams.return_value = ["stream1", "stream2"] + tm.stream_manager = mock_sm + + result = tm.get_streams() + + assert result == ["stream1", "stream2"] + + def test_get_streams_no_method(self): + """Test get_streams without method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock(spec=[]) # No get_streams method + tm.stream_manager = mock_sm + + result = tm.get_streams() + + assert result == [] + + def test_get_streams_exception(self): + """Test get_streams handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.get_streams.side_effect = RuntimeError("error") + tm.stream_manager = mock_sm + + result = tm.get_streams() + + assert result == [] + + def test_list_resources_no_stream_manager(self): + """Test list_resources without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + result = tm.list_resources() + + assert result == [] + + def test_list_resources_with_method(self): + """Test list_resources with stream manager that has method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.list_resources.return_value = ["resource1"] + tm.stream_manager = mock_sm + + result = tm.list_resources() + + assert result == ["resource1"] + + def test_list_resources_no_method(self): + """Test list_resources without method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock(spec=[]) + tm.stream_manager = mock_sm + + result = tm.list_resources() + + assert result == [] + + def test_list_resources_exception(self): + """Test list_resources handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.list_resources.side_effect = RuntimeError("error") + tm.stream_manager = mock_sm + + result = tm.list_resources() + + assert result == [] + + def test_list_prompts_no_stream_manager(self): + """Test list_prompts without stream manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.stream_manager = None + + result = tm.list_prompts() + + assert result == [] + + def test_list_prompts_with_method(self): + """Test list_prompts with stream manager that has method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.list_prompts.return_value = ["prompt1"] + tm.stream_manager = mock_sm + + result = tm.list_prompts() + + assert result == ["prompt1"] + + def test_list_prompts_no_method(self): + """Test list_prompts without method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock(spec=[]) + tm.stream_manager = mock_sm + + result = tm.list_prompts() + + assert result == [] + + def test_list_prompts_exception(self): + """Test list_prompts handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.list_prompts.side_effect = RuntimeError("error") + tm.stream_manager = mock_sm + + result = tm.list_prompts() + + assert result == [] + + +class TestGlobalToolManager: + """Test global tool manager functions.""" + + def test_get_tool_manager_none(self): + """Test get_tool_manager when not set.""" + # Reset global + import mcp_cli.tools.manager as manager_module + + manager_module._GLOBAL_TOOL_MANAGER = None + + result = get_tool_manager() + + assert result is None + + def test_set_and_get_tool_manager(self): + """Test set_tool_manager and get_tool_manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + set_tool_manager(tm) + + result = get_tool_manager() + + assert result is tm + + # Cleanup + import mcp_cli.tools.manager as manager_module + + manager_module._GLOBAL_TOOL_MANAGER = None + + +class TestToolManagerGetAllToolsErrors: + """Test get_all_tools error handling.""" + + @pytest.mark.asyncio + async def test_get_all_tools_stream_manager_error(self): + """Test get_all_tools handles stream manager errors.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.get_all_tools.side_effect = RuntimeError("error") + tm.stream_manager = mock_sm + + result = await tm.get_all_tools() + + assert result == [] + + @pytest.mark.asyncio + async def test_get_all_tools_from_stream_manager(self): + """Test get_all_tools returns converted tools from stream_manager.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.get_all_tools.return_value = [ + { + "name": "tool1", + "namespace": "ns", + "description": "desc", + "inputSchema": {}, + } + ] + # tool_to_server_map maps tool name to server name (used for namespace) + mock_sm.tool_to_server_map = {"tool1": "test-server"} + tm.stream_manager = mock_sm + + result = await tm.get_all_tools() + + assert len(result) == 1 + assert result[0].name == "tool1" + assert result[0].namespace == "test-server" + + +class TestToolManagerInitializeAsync: + """Test async initialization methods.""" + + @pytest.mark.asyncio + async def test_initialize_no_config(self): + """Test initialize with no config.""" + tm = ToolManager(config_file="nonexistent.json", servers=[]) + + result = await tm.initialize() + + assert result is True # Empty toolset setup + + @pytest.mark.asyncio + async def test_setup_empty_toolset(self): + """Test _setup_empty_toolset.""" + tm = ToolManager(config_file="test.json", servers=[]) + + result = await tm._setup_empty_toolset() + + assert result is True + assert tm.stream_manager is None + assert tm._registry is None + assert tm.processor is None + + @pytest.mark.asyncio + async def test_initialize_stream_manager_no_servers(self): + """Test _initialize_stream_manager with no servers.""" + tm = ToolManager(config_file="test.json", servers=[]) + + result = await tm._initialize_stream_manager("stdio") + + assert result is True + + @pytest.mark.asyncio + async def test_initialize_stream_manager_with_http_servers(self, tmp_path): + """Test _initialize_stream_manager with HTTP servers.""" + import json + + config = {"mcpServers": {"test": {"url": "https://example.com"}}} + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager(config_file=str(config_file), servers=["test"]) + tm._config_loader.load() + tm._config_loader.detect_server_types(tm._config_loader._config_cache) + + # Mock StreamManager to avoid actual connection + with patch("mcp_cli.tools.manager.StreamManager") as MockSM: + mock_sm = AsyncMock() + MockSM.return_value = mock_sm + + result = await tm._initialize_stream_manager("stdio") + + # Should complete without error + assert result is True + + +class TestToolManagerGetServerInfo: + """Test get_server_info method.""" + + @pytest.mark.asyncio + async def test_get_server_info_with_servers(self, tmp_path): + """Test get_server_info returns correct info.""" + # Create a temp config file + import json + + config = { + "mcpServers": { + "http_server": {"url": "https://example.com"}, + "stdio_server": {"command": "python", "args": ["-m", "server"]}, + } + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager( + config_file=str(config_file), servers=["http_server", "stdio_server"] + ) + mock_sm = MagicMock() + mock_sm.get_all_tools.return_value = [ + { + "name": "tool1", + "namespace": "http_server", + "description": "desc", + "inputSchema": {}, + } + ] + # tool_to_server_map maps tool name to server name + mock_sm.tool_to_server_map = {"tool1": "http_server"} + tm.stream_manager = mock_sm + + # Load config and detect server types + tm._config_loader.load() + tm._config_loader.detect_server_types(tm._config_loader._config_cache) + + result = await tm.get_server_info() + + assert len(result) >= 1 + # Check tool count for http_server + http_servers = [s for s in result if s.name == "http_server"] + if http_servers: + assert http_servers[0].tool_count == 1 + + @pytest.mark.asyncio + async def test_get_server_info_exception(self): + """Test get_server_info handles exceptions.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + tm.stream_manager = mock_sm + tm.get_all_tools = AsyncMock(side_effect=RuntimeError("error")) + + result = await tm.get_server_info() + + assert result == [] + + +class TestToolManagerRevalidateTools: + """Test revalidate_tools method.""" + + @pytest.mark.asyncio + async def test_revalidate_tools_success(self): + """Test successful revalidation.""" + tm = ToolManager(config_file="test.json", servers=[]) + tools = [ + ToolInfo(name="t1", namespace="ns", description="desc", parameters={}), + ToolInfo(name="t2", namespace="ns", description="desc", parameters={}), + ] + tm.get_all_tools = AsyncMock(return_value=tools) + + result = await tm.revalidate_tools() + + assert result["total"] == 2 + assert "valid" in result + assert "invalid" in result + + +class TestToolManagerValidateSingleToolInvalid: + """Test validate_single_tool with invalid tool.""" + + @pytest.mark.asyncio + async def test_validate_single_tool_invalid_tool(self): + """Test validate_single_tool returns error for invalid tool.""" + tm = ToolManager(config_file="test.json", servers=[]) + + # Tool with invalid schema + tool = ToolInfo( + name="bad_tool", + namespace="ns", + description=None, # Missing description + parameters={"invalid": "schema"}, # Invalid parameters + ) + tm.get_all_tools = AsyncMock(return_value=[tool]) + + valid, error = await tm.validate_single_tool("bad_tool") + + # The result depends on filter behavior, but we're testing coverage + assert isinstance(valid, bool) + + +class TestToolManagerParallelExecution: + """Test parallel tool execution methods.""" + + @pytest.mark.asyncio + async def test_execute_tools_parallel(self): + """Test execute_tools_parallel method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.call_tool = AsyncMock(return_value={"output": "test"}) + tm.stream_manager = mock_sm + + calls = [ + CTPToolCall(id="call_1", tool="regular_tool", arguments={}), + ] + + results = await tm.execute_tools_parallel(calls) + + assert len(results) == 1 + + @pytest.mark.asyncio + async def test_stream_execute_tools(self): + """Test stream_execute_tools method.""" + tm = ToolManager(config_file="test.json", servers=[]) + mock_sm = MagicMock() + mock_sm.call_tool = AsyncMock(return_value={"output": "test"}) + tm.stream_manager = mock_sm + + calls = [ + CTPToolCall(id="call_1", tool="regular_tool", arguments={}), + ] + + results = [] + async for result in tm.stream_execute_tools(calls): + results.append(result) + + assert len(results) == 1 + + +class TestToolManagerGetToolByName: + """Test get_tool_by_name method.""" + + @pytest.mark.asyncio + async def test_get_tool_by_name_not_found(self): + """Test get_tool_by_name returns None when not found.""" + tm = ToolManager(config_file="test.json", servers=[]) + tm.get_all_tools = AsyncMock(return_value=[]) + + result = await tm.get_tool_by_name("nonexistent") + + assert result is None + + @pytest.mark.asyncio + async def test_get_tool_by_name_with_namespace_not_found(self): + """Test get_tool_by_name with namespace returns None when not found.""" + tm = ToolManager(config_file="test.json", servers=[]) + tool = ToolInfo(name="t1", namespace="ns1") + tm.get_all_tools = AsyncMock(return_value=[tool]) + + result = await tm.get_tool_by_name("t1", namespace="wrong_ns") + + assert result is None + + +class TestToolManagerFormatToolResponse: + """Additional format_tool_response tests.""" + + def test_format_tool_response_list_with_text_type(self): + """Test format_tool_response list with text type items.""" + # Pass a list with text type items that fall through to the simple branch + response = [{"type": "text", "text": "hello"}] + result = ToolManager.format_tool_response(response) + + # Should extract text + assert "hello" in result + + def test_format_tool_response_list_non_text(self): + """Test format_tool_response with non-text list items.""" + response = [{"type": "image", "data": "base64..."}] + result = ToolManager.format_tool_response(response) + + # Should return JSON + assert "image" in result + + +class TestToolManagerInitializeSSE: + """Test initialization with SSE servers.""" + + @pytest.mark.asyncio + async def test_initialize_stream_manager_sse_servers(self, tmp_path): + """Test _initialize_stream_manager with SSE servers.""" + import json + + config = { + "mcpServers": { + "sse_server": {"url": "https://sse.example.com", "transport": "sse"} + } + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager(config_file=str(config_file), servers=["sse_server"]) + tm._config_loader.load() + tm._config_loader.detect_server_types(tm._config_loader._config_cache) + + # Mock StreamManager + with patch("mcp_cli.tools.manager.StreamManager") as MockSM: + mock_sm = MagicMock() + mock_sm.initialize_with_sse = AsyncMock() + MockSM.return_value = mock_sm + + result = await tm._initialize_stream_manager("mcp-cli") + + assert result is True + mock_sm.initialize_with_sse.assert_called_once() + + +class TestToolManagerInitializeSTDIO: + """Test initialization with STDIO servers.""" + + @pytest.mark.asyncio + async def test_initialize_stream_manager_stdio_servers(self, tmp_path): + """Test _initialize_stream_manager with STDIO servers.""" + import json + + config = { + "mcpServers": { + "stdio_server": { + "command": "python", + "args": ["-m", "server"], + "env": {"DEBUG": "1"}, + } + } + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager(config_file=str(config_file), servers=["stdio_server"]) + tm._config_loader.load() + tm._config_loader.detect_server_types(tm._config_loader._config_cache) + + # Mock StreamManager + with patch("mcp_cli.tools.manager.StreamManager") as MockSM: + mock_sm = MagicMock() + mock_sm.initialize_with_stdio = AsyncMock() + MockSM.return_value = mock_sm + + result = await tm._initialize_stream_manager("mcp-cli") + + assert result is True + mock_sm.initialize_with_stdio.assert_called_once() + + +class TestToolManagerInitializeMixed: + """Test initialization with mixed server types.""" + + @pytest.mark.asyncio + async def test_initialize_with_http_and_stdio(self, tmp_path): + """Test _initialize_stream_manager with HTTP and STDIO servers (parallel).""" + import json + + config = { + "mcpServers": { + "http_server": {"url": "https://example.com"}, + "stdio_server": {"command": "python", "args": []}, + } + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager( + config_file=str(config_file), servers=["http_server", "stdio_server"] + ) + tm._config_loader.load() + tm._config_loader.detect_server_types(tm._config_loader._config_cache) + + # Mock StreamManager + with patch("mcp_cli.tools.manager.StreamManager") as MockSM: + mock_sm = MagicMock() + mock_sm.initialize_with_http_streamable = AsyncMock() + mock_sm.initialize_with_stdio = AsyncMock() + mock_sm.registry = MagicMock() + mock_sm.processor = MagicMock() + MockSM.return_value = mock_sm + + result = await tm._initialize_stream_manager("mcp-cli") + + assert result is True + # Both should be called (parallel init) + mock_sm.initialize_with_http_streamable.assert_called_once() + mock_sm.initialize_with_stdio.assert_called_once() + + +class TestToolManagerInitializeErrors: + """Test initialization error handling.""" + + @pytest.mark.asyncio + async def test_initialize_with_partial_failure(self, tmp_path): + """Test _initialize_stream_manager continues when some transports fail.""" + import json + + config = { + "mcpServers": { + "http_server": {"url": "https://example.com"}, + } + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager(config_file=str(config_file), servers=["http_server"]) + tm._config_loader.load() + tm._config_loader.detect_server_types(tm._config_loader._config_cache) + + # Mock StreamManager with failure + with patch("mcp_cli.tools.manager.StreamManager") as MockSM: + mock_sm = MagicMock() + mock_sm.initialize_with_http_streamable = AsyncMock( + side_effect=RuntimeError("Connection failed") + ) + MockSM.return_value = mock_sm + + result = await tm._initialize_stream_manager("mcp-cli") + + # Should still return True (partial success) + assert result is True + + @pytest.mark.asyncio + async def test_initialize_method_exception(self, tmp_path): + """Test initialize method handles exceptions.""" + import json + + config = {"mcpServers": {"test": {"url": "https://example.com"}}} + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager(config_file=str(config_file), servers=["test"]) + + # Patch the config loader to raise an exception in load() + with patch.object( + tm._config_loader, "load", side_effect=RuntimeError("Config error") + ): + result = await tm.initialize() + + assert result is False + + +class TestToolManagerValidateSingleToolInvalidExtra: + """Additional tests for validate_single_tool with invalid tools.""" + + @pytest.mark.asyncio + async def test_validate_single_tool_returns_invalid_with_empty_description(self): + """Test validate_single_tool with invalid tool.""" + tm = ToolManager(config_file="test.json", servers=[]) + + # Tool with empty description + tool = ToolInfo( + name="bad_tool", + namespace="ns", + description="", # Empty description + parameters={}, + ) + tm.get_all_tools = AsyncMock(return_value=[tool]) + + valid, error = await tm.validate_single_tool("bad_tool") + + # Tool may be invalid or may have auto-fix applied + assert isinstance(valid, bool) + + @pytest.mark.asyncio + async def test_validate_single_tool_with_invalid_schema(self): + """Test validate_single_tool returns error for validation failure.""" + tm = ToolManager(config_file="test.json", servers=[]) + + # Tool with invalid schema + tool = ToolInfo( + name="bad_tool", + namespace="ns", + description="test", + parameters={"type": "invalid_type"}, # Invalid type + ) + tm.get_all_tools = AsyncMock(return_value=[tool]) + + # Disable auto-fix + tm.tool_filter.set_auto_fix_enabled(False) + + valid, error = await tm.validate_single_tool("bad_tool") + + # Should return invalid status (either False or validation passed depends on filter) + assert isinstance(valid, bool) + + +class TestToolManagerGetAllToolsFromRegistry: + """Test get_all_tools with direct registry access.""" + + @pytest.mark.asyncio + async def test_get_all_tools_registry_exception(self, manager): + """Test get_all_tools handles registry exceptions.""" + + # Force registry to raise error + async def failing_list_tools(): + raise RuntimeError("Registry error") + + manager._registry.list_tools = failing_list_tools + + result = await manager.get_all_tools() + + assert result == [] + + @pytest.mark.asyncio + async def test_get_all_tools_metadata_exception(self, manager): + """Test get_all_tools handles metadata exceptions gracefully.""" + + # Force metadata lookup to fail + async def failing_get_metadata(name, ns): + raise RuntimeError("Metadata error") + + manager._registry.get_metadata = failing_get_metadata + + result = await manager.get_all_tools() + + # Should still return tools, just with empty metadata + assert len(result) == 3 # ns1/t1, ns2/t2, default/t1 + + +class TestToolManagerGetServerInfoSSE: + """Test get_server_info with SSE servers.""" + + @pytest.mark.asyncio + async def test_get_server_info_sse_server(self, tmp_path): + """Test get_server_info includes SSE server info.""" + import json + + config = { + "mcpServers": { + "sse_server": {"url": "https://sse.example.com", "transport": "sse"} + } + } + config_file = tmp_path / "config.json" + config_file.write_text(json.dumps(config)) + + tm = ToolManager(config_file=str(config_file), servers=["sse_server"]) + mock_sm = MagicMock() + mock_sm.get_all_tools.return_value = [] + mock_sm.tool_to_server_map = {} + tm.stream_manager = mock_sm + + # Load config and detect server types + tm._config_loader.load() + tm._config_loader.detect_server_types(tm._config_loader._config_cache) + + result = await tm.get_server_info() + + assert len(result) == 1 + assert result[0].name == "sse_server" + # Transport should be SSE + from mcp_cli.tools.models import TransportType + + assert result[0].transport == TransportType.SSE diff --git a/tests/ui/__init__.py b/tests/ui/__init__.py deleted file mode 100644 index 07c027eb..00000000 --- a/tests/ui/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -Tests for UI components. -""" diff --git a/tests/ui/test_chat_display_manager.py b/tests/ui/test_chat_display_manager.py deleted file mode 100644 index 219cd629..00000000 --- a/tests/ui/test_chat_display_manager.py +++ /dev/null @@ -1,608 +0,0 @@ -""" -Tests for the centralized chat display manager. - -Tests the ChatDisplayManager class that consolidates ALL UI display logic -for chat mode into a single coherent system. -""" - -import pytest -import time -import json -from unittest.mock import patch - -from mcp_cli.ui.chat_display_manager import ChatDisplayManager -from mcp_cli.chat.models import ToolExecutionState - - -class TestChatDisplayManager: - """Tests for ChatDisplayManager class.""" - - @pytest.fixture - def manager(self): - """Create a ChatDisplayManager instance.""" - # Console parameter is optional and not used with chuk-term - return ChatDisplayManager() - - def test_initialization(self, manager): - """Test proper initialization.""" - # Display state - assert not manager.is_streaming - assert manager.streaming_content == "" - assert manager.streaming_start_time == 0.0 - - assert not manager.is_tool_executing - assert manager.current_tool is None - assert manager.tool_start_time == 0.0 - - # Spinner - assert len(manager.spinner_frames) > 0 - assert manager.spinner_index == 0 - - # Live display tracking - assert not manager.live_display_active - assert manager.last_status_line == "" - - # ==================== STREAMING TESTS ==================== - - def test_start_streaming(self, manager): - """Test starting streaming response display.""" - manager.start_streaming() - - assert manager.is_streaming is True - assert manager.streaming_content == "" - assert isinstance(manager.streaming_start_time, float) - assert manager.live_display_active is True - - def test_update_streaming(self, manager): - """Test updating streaming content.""" - manager.is_streaming = True - manager.live_display_active = True - - with patch("builtins.print") as mock_print: - manager.update_streaming("Hello") - manager.update_streaming(" world") - - assert manager.streaming_content == "Hello world" - # Should have printed status updates - assert mock_print.call_count > 0 - - def test_update_streaming_when_not_streaming(self, manager): - """Test update streaming does nothing when not streaming.""" - manager.is_streaming = False - - with patch.object(manager, "_refresh_display") as mock_refresh: - manager.update_streaming("Hello") - - assert manager.streaming_content == "" - mock_refresh.assert_not_called() - - def test_finish_streaming(self, manager): - """Test finishing streaming response.""" - # Set up streaming state - manager.is_streaming = True - manager.streaming_content = "Final content" - manager.streaming_start_time = time.time() - 2.0 - manager.live_display_active = True - - with ( - patch("chuk_term.ui.terminal.clear_line"), - patch.object(manager, "_show_final_response") as mock_show, - ): - manager.finish_streaming() - - assert manager.is_streaming is False - assert not manager.live_display_active - - # Should show final response - mock_show.assert_called_once() - args = mock_show.call_args[0] - assert args[0] == "Final content" - assert 1.9 < args[1] < 2.1 # Elapsed time approximately 2 seconds - - def test_finish_streaming_when_not_streaming(self, manager): - """Test finish streaming when not streaming does nothing.""" - manager.is_streaming = False - - with patch.object(manager, "_stop_live_display") as mock_stop: - manager.finish_streaming() - mock_stop.assert_not_called() - - def test_finish_streaming_no_content(self, manager): - """Test finish streaming with no content.""" - manager.is_streaming = True - manager.streaming_content = "" - manager.live_display_active = True - - with patch("chuk_term.ui.output.print"): - manager.finish_streaming() - - assert not manager.is_streaming - # No content means no display - - # ==================== TOOL EXECUTION TESTS ==================== - - def test_start_tool_execution(self, manager): - """Test starting tool execution display.""" - tool_name = "test_tool" - arguments = {"param1": "value1", "param2": 42} - - manager.start_tool_execution(tool_name, arguments) - - assert manager.is_tool_executing is True - assert manager.current_tool is not None - assert manager.live_display_active is True - - current_tool = manager.current_tool - assert current_tool.name == tool_name - assert current_tool.arguments == arguments - assert isinstance(current_tool.start_time, float) - - def test_finish_tool_execution_success(self, manager): - """Test finishing tool execution successfully.""" - from chuk_term.ui import output - - # Set up tool execution state - manager.is_tool_executing = True - manager.current_tool = ToolExecutionState( - name="test_tool", - arguments={"param": "value"}, - start_time=time.time() - 1.5, - ) - manager.live_display_active = True - - result = "Tool completed successfully" - - with ( - patch.object(output, "success") as mock_success, - patch("mcp_cli.ui.chat_display_manager.clear_line"), - ): - manager.finish_tool_execution(result, success=True) - - assert manager.is_tool_executing is False - assert manager.current_tool is None - assert not manager.live_display_active - - # Should show success message - mock_success.assert_called_once() - assert "Completed" in mock_success.call_args[0][0] - - def test_finish_tool_execution_failure(self, manager): - """Test finishing tool execution with failure.""" - from chuk_term.ui import output - - manager.is_tool_executing = True - manager.current_tool = ToolExecutionState( - name="failing_tool", - arguments={}, - start_time=time.time() - 0.5, - ) - manager.live_display_active = True - - error_result = "Tool failed with error" - - with ( - patch.object(output, "error") as mock_error, - patch("mcp_cli.ui.chat_display_manager.clear_line"), - ): - manager.finish_tool_execution(error_result, success=False) - - assert manager.is_tool_executing is False - assert manager.current_tool is None - - # Should show error message - mock_error.assert_called_once() - assert "Failed" in mock_error.call_args[0][0] - - def test_finish_tool_execution_when_not_executing(self, manager): - """Test finish tool execution when not executing does nothing.""" - manager.is_tool_executing = False - manager.current_tool = None - - with patch.object(manager, "_stop_live_display") as mock_stop: - manager.finish_tool_execution("result") - mock_stop.assert_not_called() - - # ==================== MESSAGE DISPLAY TESTS ==================== - - def test_show_user_message(self, manager): - """Test showing user message.""" - from chuk_term.ui import output - - message = "Hello, how can I help?" - - with patch.object(output, "print") as mock_print: - manager.show_user_message(message) - - mock_print.assert_called_once() - assert "User:" in mock_print.call_args[0][0] - assert message in mock_print.call_args[0][0] - - def test_show_assistant_message(self, manager): - """Test showing assistant message.""" - from chuk_term.ui import output - - content = "Here's my response" - elapsed = 2.5 - - with patch.object(output, "print") as mock_print: - manager.show_assistant_message(content, elapsed) - - assert mock_print.call_count == 2 # Header and content - calls = [str(c) for c in mock_print.call_args_list] - assert any("Assistant" in c for c in calls) - assert any(content in c for c in calls) - - # ==================== LIVE DISPLAY MANAGEMENT TESTS ==================== - - def test_ensure_live_display(self, manager): - """Test ensuring live display is active.""" - assert not manager.live_display_active - - manager._ensure_live_display() - - assert manager.live_display_active - - def test_ensure_live_display_when_already_active(self, manager): - """Test ensure live display when already active.""" - manager.live_display_active = True - - manager._ensure_live_display() - - # Should still be active - assert manager.live_display_active - - def test_stop_live_display(self, manager): - """Test stopping live display.""" - - manager.live_display_active = True - manager.last_status_line = "test status" - - with patch("mcp_cli.ui.chat_display_manager.clear_line") as mock_clear: - manager._stop_live_display() - - assert not manager.live_display_active - assert manager.last_status_line == "" - mock_clear.assert_called_once() - - def test_stop_live_display_when_none(self, manager): - """Test stopping live display when none exists.""" - manager.live_display_active = False - - # Should not raise an error - manager._stop_live_display() - - def test_refresh_display(self, manager): - """Test refreshing live display.""" - manager.live_display_active = True - manager.last_status_line = "old status" - manager.is_streaming = True - manager.streaming_content = "test" - - with patch("builtins.print") as mock_print: - with patch("chuk_term.ui.terminal.clear_line"): - manager._refresh_display() - - # Should have printed new status - mock_print.assert_called() - - def test_refresh_display_when_not_active(self, manager): - """Test refresh display when not active.""" - manager.live_display_active = False - - with patch("builtins.print") as mock_print: - manager._refresh_display() - - # Should not print anything - mock_print.assert_not_called() - - # ==================== LIVE STATUS CREATION TESTS ==================== - - @patch("mcp_cli.ui.chat_display_manager.time.time") - def test_create_live_status_streaming(self, mock_time, manager): - """Test creating live status during streaming.""" - mock_time.return_value = 100.5 - - manager.is_streaming = True - manager.streaming_content = "Test streaming content" - manager.streaming_start_time = 100.0 - - status = manager._create_live_status() - - assert "Generating response" in status - assert "22 chars" in status # len("Test streaming content") - assert "0.5s" in status - - @patch("mcp_cli.ui.chat_display_manager.time.time") - def test_create_live_status_tool_executing(self, mock_time, manager): - """Test creating live status during tool execution.""" - mock_time.return_value = 200.5 - - manager.is_tool_executing = True - manager.current_tool = ToolExecutionState( - name="test_tool", - arguments={"param": "value"}, - start_time=200.0, - ) - - status = manager._create_live_status() - - assert "Executing test_tool" in status - assert "(0.5s)" in status - - def test_create_live_status_spinner_animation(self, manager): - """Test spinner animation in live status.""" - manager.is_streaming = True - manager.streaming_content = "test" - manager.streaming_start_time = time.time() - - initial_index = manager.spinner_index - - # Create status multiple times - manager._create_live_status() - manager._create_live_status() - - # Spinner should animate - assert manager.spinner_index != initial_index - assert manager.spinner_index < len(manager.spinner_frames) - - def test_create_live_status_neither_streaming_nor_executing(self, manager): - """Test create live status when neither streaming nor executing.""" - status = manager._create_live_status() - assert status == "" - - # ==================== FINAL DISPLAY TESTS ==================== - - def test_show_final_response(self, manager): - """Test showing final response.""" - from chuk_term.ui import output - - content = "Final response content" - elapsed = 2.5 - - with patch.object(output, "print") as mock_print: - manager._show_final_response(content, elapsed) - - assert mock_print.call_count == 2 # Header and content - calls = [str(c) for c in mock_print.call_args_list] - assert any("Assistant" in c for c in calls) - assert any(content in c for c in calls) - - def test_show_final_tool_result_success(self, manager): - """Test showing successful tool result.""" - from chuk_term.ui import output - - manager.current_tool = ToolExecutionState( - name="successful_tool", - arguments={"param": "value"}, - start_time=time.time(), - elapsed=1.5, - success=True, - result='{"status": "ok"}', - ) - - with ( - patch.object(output, "success") as mock_success, - patch.object(output, "code") as mock_code, - patch.object(output, "print"), - ): - manager._show_final_tool_result() - - mock_success.assert_called_once() - assert "Completed" in mock_success.call_args[0][0] - - # Should format JSON result - mock_code.assert_called_once() - - def test_show_final_tool_result_failure(self, manager): - """Test showing failed tool result.""" - from chuk_term.ui import output - - manager.current_tool = ToolExecutionState( - name="failed_tool", - arguments={"param": "value"}, - start_time=time.time(), - elapsed=0.8, - success=False, - result="Error occurred during execution", - ) - - with ( - patch.object(output, "error") as mock_error, - patch.object(output, "print"), - ): - manager._show_final_tool_result() - - mock_error.assert_called_once() - assert "Failed" in mock_error.call_args[0][0] - - def test_show_final_tool_result_no_current_tool(self, manager): - """Test showing tool result when no current tool.""" - from chuk_term.ui import output - - manager.current_tool = None - - with ( - patch.object(output, "success") as mock_success, - patch.object(output, "error") as mock_error, - ): - manager._show_final_tool_result() - - # Should not call any output methods - mock_success.assert_not_called() - mock_error.assert_not_called() - - def test_show_final_tool_result_json_formatting(self, manager): - """Test tool result with JSON formatting.""" - from chuk_term.ui import output - - json_result = '{"status": "success", "data": {"items": [1, 2, 3]}}' - - manager.current_tool = ToolExecutionState( - name="json_tool", - arguments={}, - start_time=time.time(), - elapsed=1.0, - success=True, - result=json_result, - ) - - with ( - patch.object(output, "success"), - patch.object(output, "code") as mock_code, - patch.object(output, "print"), - ): - manager._show_final_tool_result() - - # Should format JSON nicely - mock_code.assert_called_once() - formatted = mock_code.call_args[0][0] - # Check it's formatted JSON - parsed = json.loads(formatted) - assert parsed["status"] == "success" - - def test_show_final_tool_result_invalid_json(self, manager): - """Test tool result with invalid JSON falls back to text.""" - from chuk_term.ui import output - - invalid_json = "Not valid JSON content" - - manager.current_tool = ToolExecutionState( - name="text_tool", - arguments={}, - start_time=time.time(), - elapsed=1.0, - success=True, - result=invalid_json, - ) - - with ( - patch.object(output, "success"), - patch.object(output, "print") as mock_print, - ): - manager._show_final_tool_result() - - # Should print as plain text, not code - mock_print.assert_called() - # Check the result text was printed - calls = mock_print.call_args_list - assert any("Not valid JSON content" in str(c) for c in calls) - - def test_show_final_tool_result_filtered_arguments(self, manager): - """Test tool result with filtered arguments display.""" - from chuk_term.ui import output - - manager.current_tool = ToolExecutionState( - name="test_tool", - arguments={ - "valid_param": "value", - "empty_param": "", - "none_param": None, - "whitespace_param": " ", - "another_valid": "another_value", - }, - start_time=time.time(), - elapsed=1.0, - success=True, - result="result", - ) - - with ( - patch.object(output, "success"), - patch.object(output, "print") as mock_print, - ): - manager._show_final_tool_result() - - # Should only show valid_param and another_valid - print_calls = str(mock_print.call_args_list) - assert "valid_param" in print_calls - assert "another_valid" in print_calls - assert "empty_param" not in print_calls - assert "none_param" not in print_calls - - # ==================== INVOCATION AND RESULT TESTS ==================== - - def test_show_tool_invocation(self, manager): - """Test showing tool invocation.""" - from chuk_term.ui import output - - tool_name = "test_tool" - arguments = {"param": "value"} - - with patch.object(output, "tool_call") as mock_tool_call: - manager._show_tool_invocation(tool_name, arguments) - mock_tool_call.assert_called_once_with(tool_name, arguments) - - def test_show_tool_result_success(self, manager): - """Test showing successful tool result.""" - from chuk_term.ui import output - - tool_info = {"name": "test_tool"} - result = '{"status": "success"}' - elapsed = 1.5 - - with ( - patch.object(output, "success") as mock_success, - patch.object(output, "code") as mock_code, - patch.object(output, "print"), - ): - manager._show_tool_result(tool_info, result, elapsed, success=True) - - mock_success.assert_called_once() - assert "Completed" in mock_success.call_args[0][0] - mock_code.assert_called_once() # JSON formatted - - def test_show_tool_result_failure(self, manager): - """Test showing failed tool result.""" - from chuk_term.ui import output - - tool_info = {"name": "test_tool"} - result = "Error message" - elapsed = 0.5 - - with ( - patch.object(output, "error") as mock_error, - patch.object(output, "print"), - ): - manager._show_tool_result(tool_info, result, elapsed, success=False) - - mock_error.assert_called_once() - assert "Failed" in mock_error.call_args[0][0] - - # ==================== INTEGRATION TESTS ==================== - - def test_full_streaming_workflow(self, manager): - """Test complete streaming workflow.""" - with ( - patch("builtins.print"), - patch("chuk_term.ui.terminal.clear_line"), - patch("chuk_term.ui.output.print"), - ): - # Start streaming - manager.start_streaming() - assert manager.is_streaming - - # Update content - manager.update_streaming("Hello") - manager.update_streaming(" world") - assert manager.streaming_content == "Hello world" - - # Finish streaming - manager.finish_streaming() - assert not manager.is_streaming - - def test_full_tool_execution_workflow(self, manager): - """Test complete tool execution workflow.""" - with ( - patch("builtins.print"), - patch("chuk_term.ui.terminal.clear_line"), - patch("chuk_term.ui.output.success"), - ): - # Start tool execution - manager.start_tool_execution("test_tool", {"param": "value"}) - assert manager.is_tool_executing - assert manager.current_tool is not None - - # Finish execution - manager.finish_tool_execution("success result", success=True) - assert not manager.is_tool_executing - assert manager.current_tool is None diff --git a/tests/ui/test_streaming_display.py b/tests/ui/test_streaming_display.py deleted file mode 100644 index e5bd77ec..00000000 --- a/tests/ui/test_streaming_display.py +++ /dev/null @@ -1,515 +0,0 @@ -""" -Tests for the streaming display components. - -Tests the CompactStreamingDisplay class and StreamingContext context manager -for content-aware streaming display functionality. -""" - -import pytest -from unittest.mock import Mock, patch -from rich.panel import Panel - -from mcp_cli.ui.streaming_display import ( - CompactStreamingDisplay, - StreamingContext, - tokenize_text, -) - - -class TestTokenizeText: - """Tests for the tokenize_text function.""" - - def test_tokenize_simple_text(self): - """Test basic text tokenization.""" - text = "Hello world test" - tokens = list(tokenize_text(text)) - - assert len(tokens) > 0 - # All tokens should be strings - assert all(isinstance(token, str) for token in tokens) - # Joined tokens should equal original text - assert "".join(tokens) == text - - def test_tokenize_with_newlines(self): - """Test tokenization handles newlines correctly.""" - text = "Line 1\nLine 2\nLine 3" - tokens = list(tokenize_text(text)) - - assert "".join(tokens) == text - assert any("\n" in token for token in tokens) - - def test_tokenize_empty_text(self): - """Test tokenization of empty text.""" - tokens = list(tokenize_text("")) - assert tokens == [] - - def test_tokenize_single_word(self): - """Test tokenization of single word.""" - tokens = list(tokenize_text("hello")) - assert len(tokens) == 1 - assert tokens[0] == "hello" - - -class TestCompactStreamingDisplay: - """Tests for CompactStreamingDisplay class.""" - - @pytest.fixture - def display(self): - """Create a CompactStreamingDisplay instance.""" - return CompactStreamingDisplay() - - def test_initialization(self, display): - """Test proper initialization.""" - assert display.title == "🤖 Assistant" - assert display.mode == "response" - assert display.first_lines == [] - assert display.current_line == "" - assert display.total_chars == 0 - assert display.total_lines == 0 - assert not display.preview_captured - assert display.detected_type is None - assert display.content == "" - - def test_initialization_with_custom_params(self): - """Test initialization with custom parameters.""" - display = CompactStreamingDisplay(title="Custom", mode="tool") - assert display.title == "Custom" - assert display.mode == "tool" - - def test_detect_content_type_code(self, display): - """Test code detection.""" - display.detect_content_type("def function():") - assert display.detected_type == "code" - - display.detected_type = None - display.detect_content_type("function test() {") - assert display.detected_type == "code" - - display.detected_type = None - display.detect_content_type("class MyClass:") - assert display.detected_type == "code" - - display.detected_type = None - display.detect_content_type("import os") - assert display.detected_type == "code" - - def test_detect_content_type_markdown(self, display): - """Test markdown detection.""" - display.detect_content_type("## Heading") - assert display.detected_type == "markdown" - - display.detected_type = None - display.detect_content_type("### Subheading") - assert display.detected_type == "markdown" - - def test_detect_content_type_code_block(self, display): - """Test code block detection.""" - display.detect_content_type("```python\nprint('hello')\n```") - assert display.detected_type == "code" - - def test_detect_content_type_json(self, display): - """Test JSON detection.""" - display.detect_content_type('{"key": "value"}') - assert display.detected_type == "json" - - display.detected_type = None - display.detect_content_type('[{"item": 1}]') - assert display.detected_type == "json" - - def test_detect_content_type_query(self, display): - """Test SQL query detection.""" - display.detect_content_type("SELECT * FROM table") - assert display.detected_type == "query" - - display.detected_type = None - display.detect_content_type("CREATE TABLE test") - assert display.detected_type == "query" - - def test_detect_content_type_markup(self, display): - """Test HTML/XML markup detection.""" - display.detect_content_type("") - assert display.detected_type == "markup" - - display.detected_type = None - display.detect_content_type("
content
") - assert display.detected_type == "markup" - - display.detected_type = None - display.detect_content_type("") - assert display.detected_type == "markup" - - def test_detect_content_type_markdown_table(self, display): - """Test markdown table detection.""" - table_content = """ -| Column 1 | Column 2 | -|----------|----------| -| Data 1 | Data 2 | -""" - display.detect_content_type(table_content) - assert display.detected_type == "markdown_table" - - def test_detect_content_type_default_text(self, display): - """Test default text detection.""" - display.detect_content_type("Just some plain text") - assert display.detected_type == "text" - - def test_is_markdown_table_valid(self, display): - """Test valid markdown table detection.""" - table = """ -| Header 1 | Header 2 | -|----------|----------| -| Row 1 | Row 1 | -""" - assert display._is_markdown_table(table) is True - - def test_is_markdown_table_invalid(self, display): - """Test invalid markdown table detection.""" - # Not enough lines - assert display._is_markdown_table("| Header |") is False - - # No separator line - not_table = """ -| Header 1 | Header 2 | -| Row 1 | Row 1 | -""" - assert display._is_markdown_table(not_table) is False - - # No pipes - assert display._is_markdown_table("Regular text") is False - - def test_get_phase_message_response_mode(self, display): - """Test phase messages for response mode.""" - # Initial phase - phase = display.get_phase_message() - assert phase == "Starting" - - # Add some content to trigger progression - display.total_chars = 100 - phase = display.get_phase_message() - assert phase in ["Starting", "Generating response"] # Allow flexibility - - display.total_chars = 600 - phase = display.get_phase_message() - assert phase in ["Generating response", "Adding details", "Elaborating"] - - def test_get_phase_message_tool_mode(self): - """Test phase messages for tool mode.""" - display = CompactStreamingDisplay(mode="tool") - - phase = display.get_phase_message() - assert phase == "Preparing tool" - - display.total_chars = 200 - phase = display.get_phase_message() - assert phase == "Executing tool" - - def test_get_phase_message_thinking_mode(self): - """Test phase messages for thinking mode.""" - display = CompactStreamingDisplay(mode="thinking") - - phase = display.get_phase_message() - assert phase == "Thinking" - - display.total_chars = 200 - phase = display.get_phase_message() - assert phase == "Analyzing request" - - def test_get_phase_message_with_detected_code_type(self, display): - """Test phase messages adapt to detected content type.""" - display.detected_type = "code" - - phase = display.get_phase_message() - assert phase == "Starting" - - display.total_chars = 100 - phase = display.get_phase_message() - assert phase == "Writing code" - - def test_add_content_basic(self, display): - """Test basic content addition.""" - content = "Hello world" - display.add_content(content) - - assert display.content == content - assert display.total_chars == len(content) - - def test_add_content_with_newlines(self, display): - """Test content addition with line counting.""" - content = "Line 1\nLine 2\nLine 3" - display.add_content(content) - - assert display.content == content - assert display.total_chars == len(content) - assert display.total_lines >= 2 # Should count newlines - - def test_add_content_preview_capture(self, display): - """Test preview line capturing.""" - lines = [ - "Short line 1", - "Short line 2", - "Short line 3", - "Short line 4", - "Short line 5", - ] - content = "\n".join(lines) - - display.add_content(content) - - # Should capture first few lines - assert len(display.first_lines) <= display.max_preview_lines - assert display.first_lines[0] == lines[0] - - def test_add_content_long_line_truncation(self, display): - """Test long line truncation in preview.""" - long_line = "x" * 100 # Very long line - display.add_content(long_line) - - # Should truncate long lines - assert len(display.first_lines) > 0 - assert len(display.first_lines[0]) <= 70 - - def test_get_panel_basic(self, display): - """Test panel generation.""" - display.add_content("Some content") - panel = display.get_panel(elapsed=1.5) - - assert isinstance(panel, Panel) - assert display.title in str(panel.title) - - def test_get_panel_with_preview(self, display): - """Test panel with content preview.""" - display.add_content("Line 1\nLine 2\nLine 3") - panel = display.get_panel(elapsed=1.0) - - # Panel should be rendered - assert isinstance(panel, Panel) - # Should have fixed height for stability - assert panel.height == 10 - - def test_get_final_panel_markdown(self, display): - """Test final panel with markdown content.""" - display.content = "# Heading\n\nSome **bold** text" - panel = display.get_final_panel(elapsed=2.5) - - assert isinstance(panel, Panel) - assert "Response time: 2.50s" in str(panel.subtitle) - - def test_get_final_panel_markdown_table(self, display): - """Test final panel with markdown table.""" - display.content = """ -| Column 1 | Column 2 | -|----------|----------| -| Data 1 | Data 2 | -""" - panel = display.get_final_panel(elapsed=1.0) - - assert isinstance(panel, Panel) - # Should handle table content appropriately - - def test_get_final_panel_code_block(self, display): - """Test final panel with code blocks.""" - display.content = "```python\nprint('hello')\n```" - panel = display.get_final_panel(elapsed=1.0) - - assert isinstance(panel, Panel) - - def test_get_final_panel_fallback_to_text(self, display): - """Test fallback to text when markdown fails.""" - # Create content that might cause markdown to fail - display.content = "Some content with < > special chars" - - with patch( - "mcp_cli.ui.streaming_display.Markdown", side_effect=Exception("Test error") - ): - panel = display.get_final_panel(elapsed=1.0) - assert isinstance(panel, Panel) - - -class TestStreamingContext: - """Tests for StreamingContext context manager.""" - - @pytest.fixture - def console(self): - """Create a properly configured mock console.""" - from threading import RLock - - console = Mock() - # Add required Rich Console attributes and methods - console.is_jupyter = False - console.is_terminal = True - console.is_dumb_terminal = False - console.set_live = Mock() - console.clear_live = Mock() - console.show_cursor = Mock() - console.push_render_hook = Mock() - console.pop_render_hook = Mock() - console.set_alt_screen = Mock(return_value=False) - console.print = Mock() - console._lock = RLock() - console._live = None - console.options = Mock() - console.options.max_width = 80 - # Make console subscriptable for Rich internals - console.__getitem__ = Mock(return_value=None) - # Make it support context manager protocol - console.__enter__ = Mock(return_value=console) - console.__exit__ = Mock(return_value=None) - return console - - @patch("mcp_cli.ui.streaming_display.Live") - def test_context_manager_basic(self, mock_live_class, console): - """Test basic context manager functionality.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live_class.return_value = mock_live - - with StreamingContext(console) as ctx: - assert ctx is not None - assert ctx.display is not None - assert ctx.live is not None - - @patch("mcp_cli.ui.streaming_display.Live") - def test_context_manager_with_content(self, mock_live_class, console): - """Test context manager with content updates.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live.update = Mock() - mock_live_class.return_value = mock_live - - with StreamingContext(console, title="Test") as ctx: - ctx.update("Hello") - ctx.update(" world") - - assert ctx.content == "Hello world" - - @patch("mcp_cli.ui.streaming_display.Live") - def test_context_manager_custom_params(self, mock_live_class, console): - """Test context manager with custom parameters.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live_class.return_value = mock_live - - with StreamingContext( - console, - title="Custom Title", - mode="tool", - refresh_per_second=4, - transient=False, - ) as ctx: - assert ctx.display.title == "Custom Title" - assert ctx.display.mode == "tool" - assert ctx.refresh_per_second == 4 - assert ctx.transient is False - - @patch("mcp_cli.ui.streaming_display.Live") - def test_context_manager_live_display_lifecycle(self, mock_live_class, console): - """Test Live display lifecycle management.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live.update = Mock() - mock_live_class.return_value = mock_live - - with StreamingContext(console) as ctx: - ctx.update("test content") - - # Verify Live was created and used properly - mock_live_class.assert_called_once() - mock_live.__enter__.assert_called_once() - mock_live.__exit__.assert_called_once() - mock_live.update.assert_called() - - @patch("mcp_cli.ui.streaming_display.Live") - def test_content_property(self, mock_live_class, console): - """Test content property access.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live.update = Mock() - mock_live_class.return_value = mock_live - - with StreamingContext(console) as ctx: - assert ctx.content == "" - - ctx.update("test") - assert ctx.content == "test" - - @patch("mcp_cli.ui.streaming_display.Live") - @patch("mcp_cli.ui.streaming_display.time.time") - def test_elapsed_time_tracking(self, mock_time, mock_live_class, console): - """Test elapsed time is tracked correctly.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live.update = Mock() - mock_live_class.return_value = mock_live - - # Mock time progression - mock_time.side_effect = [100.0, 100.5, 101.0] # start, update, end - - with StreamingContext(console) as ctx: - # Time should progress as we make updates - ctx.update("content") - - # Verify time was called appropriately - assert mock_time.call_count >= 2 - - def test_update_method(self, console): - """Test update method functionality.""" - with patch("mcp_cli.ui.streaming_display.Live") as mock_live_class: - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live.update = Mock() - mock_live_class.return_value = mock_live - - with StreamingContext(console) as ctx: - ctx.update("first") - ctx.update(" second") - - # Should have updated Live display - assert mock_live.update.call_count >= 2 - assert ctx.content == "first second" - - @patch("mcp_cli.ui.streaming_display.Live") - def test_final_panel_display(self, mock_live_class, console): - """Test final panel is displayed after context exit.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live.update = Mock() - mock_live_class.return_value = mock_live - - with StreamingContext(console) as ctx: - ctx.update("Final content") - - # Console should have been called to print final panel - console.print.assert_called() - - # The printed object should be a Panel - args, kwargs = console.print.call_args - assert len(args) > 0 - # Check if it's a Panel (the final panel) - printed_obj = args[0] - assert hasattr(printed_obj, "title") or isinstance(printed_obj, Panel) - - @patch("mcp_cli.ui.streaming_display.Live") - def test_no_final_panel_without_content(self, mock_live_class, console): - """Test no final panel is shown if no content was added.""" - mock_live = Mock() - mock_live.__enter__ = Mock(return_value=mock_live) - mock_live.__exit__ = Mock(return_value=None) - mock_live_class.return_value = mock_live - - with StreamingContext(console): - pass # No content added - - # Should not print anything if no content - # (or should print minimal content) - if console.print.called: - args, kwargs = console.print.call_args - # If something was printed, it should be minimal or empty - pass # Allow some flexibility in implementation diff --git a/tests/utils/test_preferences.py b/tests/utils/test_preferences.py index 2dff59d5..39484b59 100644 --- a/tests/utils/test_preferences.py +++ b/tests/utils/test_preferences.py @@ -539,14 +539,17 @@ def test_tool_patterns(self): # Verify patterns added patterns = manager.preferences.ui.tool_confirmation.patterns assert len(patterns) == 2 - assert {"pattern": "write_*", "action": "always"} in patterns - assert {"pattern": "read_*", "action": "never"} in patterns + # Patterns are now ToolPatternRule Pydantic models + assert any( + p.pattern == "write_*" and p.action == "always" for p in patterns + ) + assert any(p.pattern == "read_*" and p.action == "never" for p in patterns) # Remove pattern assert manager.remove_tool_pattern("write_*") is True patterns = manager.preferences.ui.tool_confirmation.patterns assert len(patterns) == 1 - assert {"pattern": "read_*", "action": "never"} in patterns + assert any(p.pattern == "read_*" and p.action == "never" for p in patterns) # Remove non-existent pattern assert manager.remove_tool_pattern("nonexistent_*") is False diff --git a/uv.lock b/uv.lock index 4e765d72..108eb5c9 100644 --- a/uv.lock +++ b/uv.lock @@ -21,7 +21,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.13.2" +version = "3.13.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -32,93 +32,93 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/35/74/b321e7d7ca762638cdf8cdeceb39755d9c745aff7a64c8789be96ddf6e96/aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0", size = 743409, upload-time = "2025-10-28T20:56:00.354Z" }, - { url = "https://files.pythonhosted.org/packages/99/3d/91524b905ec473beaf35158d17f82ef5a38033e5809fe8742e3657cdbb97/aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb", size = 497006, upload-time = "2025-10-28T20:56:01.85Z" }, - { url = "https://files.pythonhosted.org/packages/eb/d3/7f68bc02a67716fe80f063e19adbd80a642e30682ce74071269e17d2dba1/aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9", size = 493195, upload-time = "2025-10-28T20:56:03.314Z" }, - { url = "https://files.pythonhosted.org/packages/98/31/913f774a4708775433b7375c4f867d58ba58ead833af96c8af3621a0d243/aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613", size = 1747759, upload-time = "2025-10-28T20:56:04.904Z" }, - { url = "https://files.pythonhosted.org/packages/e8/63/04efe156f4326f31c7c4a97144f82132c3bb21859b7bb84748d452ccc17c/aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead", size = 1704456, upload-time = "2025-10-28T20:56:06.986Z" }, - { url = "https://files.pythonhosted.org/packages/8e/02/4e16154d8e0a9cf4ae76f692941fd52543bbb148f02f098ca73cab9b1c1b/aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780", size = 1807572, upload-time = "2025-10-28T20:56:08.558Z" }, - { url = "https://files.pythonhosted.org/packages/34/58/b0583defb38689e7f06798f0285b1ffb3a6fb371f38363ce5fd772112724/aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a", size = 1895954, upload-time = "2025-10-28T20:56:10.545Z" }, - { url = "https://files.pythonhosted.org/packages/6b/f3/083907ee3437425b4e376aa58b2c915eb1a33703ec0dc30040f7ae3368c6/aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592", size = 1747092, upload-time = "2025-10-28T20:56:12.118Z" }, - { url = "https://files.pythonhosted.org/packages/ac/61/98a47319b4e425cc134e05e5f3fc512bf9a04bf65aafd9fdcda5d57ec693/aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab", size = 1606815, upload-time = "2025-10-28T20:56:14.191Z" }, - { url = "https://files.pythonhosted.org/packages/97/4b/e78b854d82f66bb974189135d31fce265dee0f5344f64dd0d345158a5973/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30", size = 1723789, upload-time = "2025-10-28T20:56:16.101Z" }, - { url = "https://files.pythonhosted.org/packages/ed/fc/9d2ccc794fc9b9acd1379d625c3a8c64a45508b5091c546dea273a41929e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40", size = 1718104, upload-time = "2025-10-28T20:56:17.655Z" }, - { url = "https://files.pythonhosted.org/packages/66/65/34564b8765ea5c7d79d23c9113135d1dd3609173da13084830f1507d56cf/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948", size = 1785584, upload-time = "2025-10-28T20:56:19.238Z" }, - { url = "https://files.pythonhosted.org/packages/30/be/f6a7a426e02fc82781afd62016417b3948e2207426d90a0e478790d1c8a4/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf", size = 1595126, upload-time = "2025-10-28T20:56:20.836Z" }, - { url = "https://files.pythonhosted.org/packages/e5/c7/8e22d5d28f94f67d2af496f14a83b3c155d915d1fe53d94b66d425ec5b42/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782", size = 1800665, upload-time = "2025-10-28T20:56:22.922Z" }, - { url = "https://files.pythonhosted.org/packages/d1/11/91133c8b68b1da9fc16555706aa7276fdf781ae2bb0876c838dd86b8116e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8", size = 1739532, upload-time = "2025-10-28T20:56:25.924Z" }, - { url = "https://files.pythonhosted.org/packages/17/6b/3747644d26a998774b21a616016620293ddefa4d63af6286f389aedac844/aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec", size = 431876, upload-time = "2025-10-28T20:56:27.524Z" }, - { url = "https://files.pythonhosted.org/packages/c3/63/688462108c1a00eb9f05765331c107f95ae86f6b197b865d29e930b7e462/aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c", size = 456205, upload-time = "2025-10-28T20:56:29.062Z" }, - { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" }, - { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" }, - { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" }, - { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" }, - { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" }, - { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" }, - { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" }, - { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" }, - { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" }, - { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" }, - { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" }, - { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" }, - { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" }, - { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" }, - { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" }, - { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" }, - { url = "https://files.pythonhosted.org/packages/bf/78/7e90ca79e5aa39f9694dcfd74f4720782d3c6828113bb1f3197f7e7c4a56/aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be", size = 732139, upload-time = "2025-10-28T20:57:02.455Z" }, - { url = "https://files.pythonhosted.org/packages/db/ed/1f59215ab6853fbaa5c8495fa6cbc39edfc93553426152b75d82a5f32b76/aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742", size = 490082, upload-time = "2025-10-28T20:57:04.784Z" }, - { url = "https://files.pythonhosted.org/packages/68/7b/fe0fe0f5e05e13629d893c760465173a15ad0039c0a5b0d0040995c8075e/aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293", size = 489035, upload-time = "2025-10-28T20:57:06.894Z" }, - { url = "https://files.pythonhosted.org/packages/d2/04/db5279e38471b7ac801d7d36a57d1230feeee130bbe2a74f72731b23c2b1/aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811", size = 1720387, upload-time = "2025-10-28T20:57:08.685Z" }, - { url = "https://files.pythonhosted.org/packages/31/07/8ea4326bd7dae2bd59828f69d7fdc6e04523caa55e4a70f4a8725a7e4ed2/aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a", size = 1688314, upload-time = "2025-10-28T20:57:10.693Z" }, - { url = "https://files.pythonhosted.org/packages/48/ab/3d98007b5b87ffd519d065225438cc3b668b2f245572a8cb53da5dd2b1bc/aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4", size = 1756317, upload-time = "2025-10-28T20:57:12.563Z" }, - { url = "https://files.pythonhosted.org/packages/97/3d/801ca172b3d857fafb7b50c7c03f91b72b867a13abca982ed6b3081774ef/aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a", size = 1858539, upload-time = "2025-10-28T20:57:14.623Z" }, - { url = "https://files.pythonhosted.org/packages/f7/0d/4764669bdf47bd472899b3d3db91fffbe925c8e3038ec591a2fd2ad6a14d/aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e", size = 1739597, upload-time = "2025-10-28T20:57:16.399Z" }, - { url = "https://files.pythonhosted.org/packages/c4/52/7bd3c6693da58ba16e657eb904a5b6decfc48ecd06e9ac098591653b1566/aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb", size = 1555006, upload-time = "2025-10-28T20:57:18.288Z" }, - { url = "https://files.pythonhosted.org/packages/48/30/9586667acec5993b6f41d2ebcf96e97a1255a85f62f3c653110a5de4d346/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded", size = 1683220, upload-time = "2025-10-28T20:57:20.241Z" }, - { url = "https://files.pythonhosted.org/packages/71/01/3afe4c96854cfd7b30d78333852e8e851dceaec1c40fd00fec90c6402dd2/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b", size = 1712570, upload-time = "2025-10-28T20:57:22.253Z" }, - { url = "https://files.pythonhosted.org/packages/11/2c/22799d8e720f4697a9e66fd9c02479e40a49de3de2f0bbe7f9f78a987808/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8", size = 1733407, upload-time = "2025-10-28T20:57:24.37Z" }, - { url = "https://files.pythonhosted.org/packages/34/cb/90f15dd029f07cebbd91f8238a8b363978b530cd128488085b5703683594/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04", size = 1550093, upload-time = "2025-10-28T20:57:26.257Z" }, - { url = "https://files.pythonhosted.org/packages/69/46/12dce9be9d3303ecbf4d30ad45a7683dc63d90733c2d9fe512be6716cd40/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476", size = 1758084, upload-time = "2025-10-28T20:57:28.349Z" }, - { url = "https://files.pythonhosted.org/packages/f9/c8/0932b558da0c302ffd639fc6362a313b98fdf235dc417bc2493da8394df7/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23", size = 1716987, upload-time = "2025-10-28T20:57:30.233Z" }, - { url = "https://files.pythonhosted.org/packages/5d/8b/f5bd1a75003daed099baec373aed678f2e9b34f2ad40d85baa1368556396/aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254", size = 425859, upload-time = "2025-10-28T20:57:32.105Z" }, - { url = "https://files.pythonhosted.org/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" }, - { url = "https://files.pythonhosted.org/packages/9b/36/e2abae1bd815f01c957cbf7be817b3043304e1c87bad526292a0410fdcf9/aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b", size = 735234, upload-time = "2025-10-28T20:57:36.415Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e3/1ee62dde9b335e4ed41db6bba02613295a0d5b41f74a783c142745a12763/aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61", size = 490733, upload-time = "2025-10-28T20:57:38.205Z" }, - { url = "https://files.pythonhosted.org/packages/1a/aa/7a451b1d6a04e8d15a362af3e9b897de71d86feac3babf8894545d08d537/aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4", size = 491303, upload-time = "2025-10-28T20:57:40.122Z" }, - { url = "https://files.pythonhosted.org/packages/57/1e/209958dbb9b01174870f6a7538cd1f3f28274fdbc88a750c238e2c456295/aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b", size = 1717965, upload-time = "2025-10-28T20:57:42.28Z" }, - { url = "https://files.pythonhosted.org/packages/08/aa/6a01848d6432f241416bc4866cae8dc03f05a5a884d2311280f6a09c73d6/aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694", size = 1667221, upload-time = "2025-10-28T20:57:44.869Z" }, - { url = "https://files.pythonhosted.org/packages/87/4f/36c1992432d31bbc789fa0b93c768d2e9047ec8c7177e5cd84ea85155f36/aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906", size = 1757178, upload-time = "2025-10-28T20:57:47.216Z" }, - { url = "https://files.pythonhosted.org/packages/ac/b4/8e940dfb03b7e0f68a82b88fd182b9be0a65cb3f35612fe38c038c3112cf/aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9", size = 1838001, upload-time = "2025-10-28T20:57:49.337Z" }, - { url = "https://files.pythonhosted.org/packages/d7/ef/39f3448795499c440ab66084a9db7d20ca7662e94305f175a80f5b7e0072/aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011", size = 1716325, upload-time = "2025-10-28T20:57:51.327Z" }, - { url = "https://files.pythonhosted.org/packages/d7/51/b311500ffc860b181c05d91c59a1313bdd05c82960fdd4035a15740d431e/aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6", size = 1547978, upload-time = "2025-10-28T20:57:53.554Z" }, - { url = "https://files.pythonhosted.org/packages/31/64/b9d733296ef79815226dab8c586ff9e3df41c6aff2e16c06697b2d2e6775/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213", size = 1682042, upload-time = "2025-10-28T20:57:55.617Z" }, - { url = "https://files.pythonhosted.org/packages/3f/30/43d3e0f9d6473a6db7d472104c4eff4417b1e9df01774cb930338806d36b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49", size = 1680085, upload-time = "2025-10-28T20:57:57.59Z" }, - { url = "https://files.pythonhosted.org/packages/16/51/c709f352c911b1864cfd1087577760ced64b3e5bee2aa88b8c0c8e2e4972/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae", size = 1728238, upload-time = "2025-10-28T20:57:59.525Z" }, - { url = "https://files.pythonhosted.org/packages/19/e2/19bd4c547092b773caeb48ff5ae4b1ae86756a0ee76c16727fcfd281404b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa", size = 1544395, upload-time = "2025-10-28T20:58:01.914Z" }, - { url = "https://files.pythonhosted.org/packages/cf/87/860f2803b27dfc5ed7be532832a3498e4919da61299b4a1f8eb89b8ff44d/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4", size = 1742965, upload-time = "2025-10-28T20:58:03.972Z" }, - { url = "https://files.pythonhosted.org/packages/67/7f/db2fc7618925e8c7a601094d5cbe539f732df4fb570740be88ed9e40e99a/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a", size = 1697585, upload-time = "2025-10-28T20:58:06.189Z" }, - { url = "https://files.pythonhosted.org/packages/0c/07/9127916cb09bb38284db5036036042b7b2c514c8ebaeee79da550c43a6d6/aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940", size = 431621, upload-time = "2025-10-28T20:58:08.636Z" }, - { url = "https://files.pythonhosted.org/packages/fb/41/554a8a380df6d3a2bba8a7726429a23f4ac62aaf38de43bb6d6cde7b4d4d/aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4", size = 457627, upload-time = "2025-10-28T20:58:11Z" }, - { url = "https://files.pythonhosted.org/packages/c7/8e/3824ef98c039d3951cb65b9205a96dd2b20f22241ee17d89c5701557c826/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673", size = 767360, upload-time = "2025-10-28T20:58:13.358Z" }, - { url = "https://files.pythonhosted.org/packages/a4/0f/6a03e3fc7595421274fa34122c973bde2d89344f8a881b728fa8c774e4f1/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd", size = 504616, upload-time = "2025-10-28T20:58:15.339Z" }, - { url = "https://files.pythonhosted.org/packages/c6/aa/ed341b670f1bc8a6f2c6a718353d13b9546e2cef3544f573c6a1ff0da711/aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3", size = 509131, upload-time = "2025-10-28T20:58:17.693Z" }, - { url = "https://files.pythonhosted.org/packages/7f/f0/c68dac234189dae5c4bbccc0f96ce0cc16b76632cfc3a08fff180045cfa4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf", size = 1864168, upload-time = "2025-10-28T20:58:20.113Z" }, - { url = "https://files.pythonhosted.org/packages/8f/65/75a9a76db8364b5d0e52a0c20eabc5d52297385d9af9c35335b924fafdee/aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e", size = 1719200, upload-time = "2025-10-28T20:58:22.583Z" }, - { url = "https://files.pythonhosted.org/packages/f5/55/8df2ed78d7f41d232f6bd3ff866b6f617026551aa1d07e2f03458f964575/aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5", size = 1843497, upload-time = "2025-10-28T20:58:24.672Z" }, - { url = "https://files.pythonhosted.org/packages/e9/e0/94d7215e405c5a02ccb6a35c7a3a6cfff242f457a00196496935f700cde5/aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad", size = 1935703, upload-time = "2025-10-28T20:58:26.758Z" }, - { url = "https://files.pythonhosted.org/packages/0b/78/1eeb63c3f9b2d1015a4c02788fb543141aad0a03ae3f7a7b669b2483f8d4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e", size = 1792738, upload-time = "2025-10-28T20:58:29.787Z" }, - { url = "https://files.pythonhosted.org/packages/41/75/aaf1eea4c188e51538c04cc568040e3082db263a57086ea74a7d38c39e42/aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61", size = 1624061, upload-time = "2025-10-28T20:58:32.529Z" }, - { url = "https://files.pythonhosted.org/packages/9b/c2/3b6034de81fbcc43de8aeb209073a2286dfb50b86e927b4efd81cf848197/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661", size = 1789201, upload-time = "2025-10-28T20:58:34.618Z" }, - { url = "https://files.pythonhosted.org/packages/c9/38/c15dcf6d4d890217dae79d7213988f4e5fe6183d43893a9cf2fe9e84ca8d/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98", size = 1776868, upload-time = "2025-10-28T20:58:38.835Z" }, - { url = "https://files.pythonhosted.org/packages/04/75/f74fd178ac81adf4f283a74847807ade5150e48feda6aef024403716c30c/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693", size = 1790660, upload-time = "2025-10-28T20:58:41.507Z" }, - { url = "https://files.pythonhosted.org/packages/e7/80/7368bd0d06b16b3aba358c16b919e9c46cf11587dc572091031b0e9e3ef0/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a", size = 1617548, upload-time = "2025-10-28T20:58:43.674Z" }, - { url = "https://files.pythonhosted.org/packages/7d/4b/a6212790c50483cb3212e507378fbe26b5086d73941e1ec4b56a30439688/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be", size = 1817240, upload-time = "2025-10-28T20:58:45.787Z" }, - { url = "https://files.pythonhosted.org/packages/ff/f7/ba5f0ba4ea8d8f3c32850912944532b933acbf0f3a75546b89269b9b7dde/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c", size = 1762334, upload-time = "2025-10-28T20:58:47.936Z" }, - { url = "https://files.pythonhosted.org/packages/7e/83/1a5a1856574588b1cad63609ea9ad75b32a8353ac995d830bf5da9357364/aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734", size = 464685, upload-time = "2025-10-28T20:58:50.642Z" }, - { url = "https://files.pythonhosted.org/packages/9f/4d/d22668674122c08f4d56972297c51a624e64b3ed1efaa40187607a7cb66e/aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f", size = 498093, upload-time = "2025-10-28T20:58:52.782Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/4c/a164164834f03924d9a29dc3acd9e7ee58f95857e0b467f6d04298594ebb/aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b", size = 746051, upload-time = "2026-01-03T17:29:43.287Z" }, + { url = "https://files.pythonhosted.org/packages/82/71/d5c31390d18d4f58115037c432b7e0348c60f6f53b727cad33172144a112/aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64", size = 499234, upload-time = "2026-01-03T17:29:44.822Z" }, + { url = "https://files.pythonhosted.org/packages/0e/c9/741f8ac91e14b1d2e7100690425a5b2b919a87a5075406582991fb7de920/aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea", size = 494979, upload-time = "2026-01-03T17:29:46.405Z" }, + { url = "https://files.pythonhosted.org/packages/75/b5/31d4d2e802dfd59f74ed47eba48869c1c21552c586d5e81a9d0d5c2ad640/aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a", size = 1748297, upload-time = "2026-01-03T17:29:48.083Z" }, + { url = "https://files.pythonhosted.org/packages/1a/3e/eefad0ad42959f226bb79664826883f2687d602a9ae2941a18e0484a74d3/aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540", size = 1707172, upload-time = "2026-01-03T17:29:49.648Z" }, + { url = "https://files.pythonhosted.org/packages/c5/3a/54a64299fac2891c346cdcf2aa6803f994a2e4beeaf2e5a09dcc54acc842/aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b", size = 1805405, upload-time = "2026-01-03T17:29:51.244Z" }, + { url = "https://files.pythonhosted.org/packages/6c/70/ddc1b7169cf64075e864f64595a14b147a895a868394a48f6a8031979038/aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3", size = 1899449, upload-time = "2026-01-03T17:29:53.938Z" }, + { url = "https://files.pythonhosted.org/packages/a1/7e/6815aab7d3a56610891c76ef79095677b8b5be6646aaf00f69b221765021/aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1", size = 1748444, upload-time = "2026-01-03T17:29:55.484Z" }, + { url = "https://files.pythonhosted.org/packages/6b/f2/073b145c4100da5511f457dc0f7558e99b2987cf72600d42b559db856fbc/aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3", size = 1606038, upload-time = "2026-01-03T17:29:57.179Z" }, + { url = "https://files.pythonhosted.org/packages/0a/c1/778d011920cae03ae01424ec202c513dc69243cf2db303965615b81deeea/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440", size = 1724156, upload-time = "2026-01-03T17:29:58.914Z" }, + { url = "https://files.pythonhosted.org/packages/0e/cb/3419eabf4ec1e9ec6f242c32b689248365a1cf621891f6f0386632525494/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7", size = 1722340, upload-time = "2026-01-03T17:30:01.962Z" }, + { url = "https://files.pythonhosted.org/packages/7a/e5/76cf77bdbc435bf233c1f114edad39ed4177ccbfab7c329482b179cff4f4/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c", size = 1783041, upload-time = "2026-01-03T17:30:03.609Z" }, + { url = "https://files.pythonhosted.org/packages/9d/d4/dd1ca234c794fd29c057ce8c0566b8ef7fd6a51069de5f06fa84b9a1971c/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51", size = 1596024, upload-time = "2026-01-03T17:30:05.132Z" }, + { url = "https://files.pythonhosted.org/packages/55/58/4345b5f26661a6180afa686c473620c30a66afdf120ed3dd545bbc809e85/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4", size = 1804590, upload-time = "2026-01-03T17:30:07.135Z" }, + { url = "https://files.pythonhosted.org/packages/7b/06/05950619af6c2df7e0a431d889ba2813c9f0129cec76f663e547a5ad56f2/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29", size = 1740355, upload-time = "2026-01-03T17:30:09.083Z" }, + { url = "https://files.pythonhosted.org/packages/3e/80/958f16de79ba0422d7c1e284b2abd0c84bc03394fbe631d0a39ffa10e1eb/aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239", size = 433701, upload-time = "2026-01-03T17:30:10.869Z" }, + { url = "https://files.pythonhosted.org/packages/dc/f2/27cdf04c9851712d6c1b99df6821a6623c3c9e55956d4b1e318c337b5a48/aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f", size = 457678, upload-time = "2026-01-03T17:30:12.719Z" }, + { url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" }, + { url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" }, + { url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" }, + { url = "https://files.pythonhosted.org/packages/8d/a8/5a35dc56a06a2c90d4742cbf35294396907027f80eea696637945a106f25/aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29", size = 1737839, upload-time = "2026-01-03T17:30:19.422Z" }, + { url = "https://files.pythonhosted.org/packages/bf/62/4b9eeb331da56530bf2e198a297e5303e1c1ebdceeb00fe9b568a65c5a0c/aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3", size = 1703932, upload-time = "2026-01-03T17:30:21.756Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f6/af16887b5d419e6a367095994c0b1332d154f647e7dc2bd50e61876e8e3d/aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d", size = 1771906, upload-time = "2026-01-03T17:30:23.932Z" }, + { url = "https://files.pythonhosted.org/packages/ce/83/397c634b1bcc24292fa1e0c7822800f9f6569e32934bdeef09dae7992dfb/aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463", size = 1871020, upload-time = "2026-01-03T17:30:26Z" }, + { url = "https://files.pythonhosted.org/packages/86/f6/a62cbbf13f0ac80a70f71b1672feba90fdb21fd7abd8dbf25c0105fb6fa3/aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc", size = 1755181, upload-time = "2026-01-03T17:30:27.554Z" }, + { url = "https://files.pythonhosted.org/packages/0a/87/20a35ad487efdd3fba93d5843efdfaa62d2f1479eaafa7453398a44faf13/aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf", size = 1561794, upload-time = "2026-01-03T17:30:29.254Z" }, + { url = "https://files.pythonhosted.org/packages/de/95/8fd69a66682012f6716e1bc09ef8a1a2a91922c5725cb904689f112309c4/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033", size = 1697900, upload-time = "2026-01-03T17:30:31.033Z" }, + { url = "https://files.pythonhosted.org/packages/e5/66/7b94b3b5ba70e955ff597672dad1691333080e37f50280178967aff68657/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f", size = 1728239, upload-time = "2026-01-03T17:30:32.703Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/6f72f77f9f7d74719692ab65a2a0252584bf8d5f301e2ecb4c0da734530a/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679", size = 1740527, upload-time = "2026-01-03T17:30:34.695Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b4/75ec16cbbd5c01bdaf4a05b19e103e78d7ce1ef7c80867eb0ace42ff4488/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423", size = 1554489, upload-time = "2026-01-03T17:30:36.864Z" }, + { url = "https://files.pythonhosted.org/packages/52/8f/bc518c0eea29f8406dcf7ed1f96c9b48e3bc3995a96159b3fc11f9e08321/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce", size = 1767852, upload-time = "2026-01-03T17:30:39.433Z" }, + { url = "https://files.pythonhosted.org/packages/9d/f2/a07a75173124f31f11ea6f863dc44e6f09afe2bca45dd4e64979490deab1/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a", size = 1722379, upload-time = "2026-01-03T17:30:41.081Z" }, + { url = "https://files.pythonhosted.org/packages/3c/4a/1a3fee7c21350cac78e5c5cef711bac1b94feca07399f3d406972e2d8fcd/aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046", size = 428253, upload-time = "2026-01-03T17:30:42.644Z" }, + { url = "https://files.pythonhosted.org/packages/d9/b7/76175c7cb4eb73d91ad63c34e29fc4f77c9386bba4a65b53ba8e05ee3c39/aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57", size = 455407, upload-time = "2026-01-03T17:30:44.195Z" }, + { url = "https://files.pythonhosted.org/packages/97/8a/12ca489246ca1faaf5432844adbfce7ff2cc4997733e0af120869345643a/aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c", size = 734190, upload-time = "2026-01-03T17:30:45.832Z" }, + { url = "https://files.pythonhosted.org/packages/32/08/de43984c74ed1fca5c014808963cc83cb00d7bb06af228f132d33862ca76/aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9", size = 491783, upload-time = "2026-01-03T17:30:47.466Z" }, + { url = "https://files.pythonhosted.org/packages/17/f8/8dd2cf6112a5a76f81f81a5130c57ca829d101ad583ce57f889179accdda/aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3", size = 490704, upload-time = "2026-01-03T17:30:49.373Z" }, + { url = "https://files.pythonhosted.org/packages/6d/40/a46b03ca03936f832bc7eaa47cfbb1ad012ba1be4790122ee4f4f8cba074/aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf", size = 1720652, upload-time = "2026-01-03T17:30:50.974Z" }, + { url = "https://files.pythonhosted.org/packages/f7/7e/917fe18e3607af92657e4285498f500dca797ff8c918bd7d90b05abf6c2a/aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6", size = 1692014, upload-time = "2026-01-03T17:30:52.729Z" }, + { url = "https://files.pythonhosted.org/packages/71/b6/cefa4cbc00d315d68973b671cf105b21a609c12b82d52e5d0c9ae61d2a09/aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d", size = 1759777, upload-time = "2026-01-03T17:30:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/fb/e3/e06ee07b45e59e6d81498b591fc589629be1553abb2a82ce33efe2a7b068/aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261", size = 1861276, upload-time = "2026-01-03T17:30:56.512Z" }, + { url = "https://files.pythonhosted.org/packages/7c/24/75d274228acf35ceeb2850b8ce04de9dd7355ff7a0b49d607ee60c29c518/aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0", size = 1743131, upload-time = "2026-01-03T17:30:58.256Z" }, + { url = "https://files.pythonhosted.org/packages/04/98/3d21dde21889b17ca2eea54fdcff21b27b93f45b7bb94ca029c31ab59dc3/aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730", size = 1556863, upload-time = "2026-01-03T17:31:00.445Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/da0c3ab1192eaf64782b03971ab4055b475d0db07b17eff925e8c93b3aa5/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91", size = 1682793, upload-time = "2026-01-03T17:31:03.024Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0f/5802ada182f575afa02cbd0ec5180d7e13a402afb7c2c03a9aa5e5d49060/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3", size = 1716676, upload-time = "2026-01-03T17:31:04.842Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8c/714d53bd8b5a4560667f7bbbb06b20c2382f9c7847d198370ec6526af39c/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4", size = 1733217, upload-time = "2026-01-03T17:31:06.868Z" }, + { url = "https://files.pythonhosted.org/packages/7d/79/e2176f46d2e963facea939f5be2d26368ce543622be6f00a12844d3c991f/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998", size = 1552303, upload-time = "2026-01-03T17:31:08.958Z" }, + { url = "https://files.pythonhosted.org/packages/ab/6a/28ed4dea1759916090587d1fe57087b03e6c784a642b85ef48217b0277ae/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0", size = 1763673, upload-time = "2026-01-03T17:31:10.676Z" }, + { url = "https://files.pythonhosted.org/packages/e8/35/4a3daeb8b9fab49240d21c04d50732313295e4bd813a465d840236dd0ce1/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591", size = 1721120, upload-time = "2026-01-03T17:31:12.575Z" }, + { url = "https://files.pythonhosted.org/packages/bc/9f/d643bb3c5fb99547323e635e251c609fbbc660d983144cfebec529e09264/aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf", size = 427383, upload-time = "2026-01-03T17:31:14.382Z" }, + { url = "https://files.pythonhosted.org/packages/4e/f1/ab0395f8a79933577cdd996dd2f9aa6014af9535f65dddcf88204682fe62/aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e", size = 453899, upload-time = "2026-01-03T17:31:15.958Z" }, + { url = "https://files.pythonhosted.org/packages/99/36/5b6514a9f5d66f4e2597e40dea2e3db271e023eb7a5d22defe96ba560996/aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808", size = 737238, upload-time = "2026-01-03T17:31:17.909Z" }, + { url = "https://files.pythonhosted.org/packages/f7/49/459327f0d5bcd8c6c9ca69e60fdeebc3622861e696490d8674a6d0cb90a6/aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415", size = 492292, upload-time = "2026-01-03T17:31:19.919Z" }, + { url = "https://files.pythonhosted.org/packages/e8/0b/b97660c5fd05d3495b4eb27f2d0ef18dc1dc4eff7511a9bf371397ff0264/aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f", size = 493021, upload-time = "2026-01-03T17:31:21.636Z" }, + { url = "https://files.pythonhosted.org/packages/54/d4/438efabdf74e30aeceb890c3290bbaa449780583b1270b00661126b8aae4/aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6", size = 1717263, upload-time = "2026-01-03T17:31:23.296Z" }, + { url = "https://files.pythonhosted.org/packages/71/f2/7bddc7fd612367d1459c5bcf598a9e8f7092d6580d98de0e057eb42697ad/aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687", size = 1669107, upload-time = "2026-01-03T17:31:25.334Z" }, + { url = "https://files.pythonhosted.org/packages/00/5a/1aeaecca40e22560f97610a329e0e5efef5e0b5afdf9f857f0d93839ab2e/aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26", size = 1760196, upload-time = "2026-01-03T17:31:27.394Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f8/0ff6992bea7bd560fc510ea1c815f87eedd745fe035589c71ce05612a19a/aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a", size = 1843591, upload-time = "2026-01-03T17:31:29.238Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d1/e30e537a15f53485b61f5be525f2157da719819e8377298502aebac45536/aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1", size = 1720277, upload-time = "2026-01-03T17:31:31.053Z" }, + { url = "https://files.pythonhosted.org/packages/84/45/23f4c451d8192f553d38d838831ebbc156907ea6e05557f39563101b7717/aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25", size = 1548575, upload-time = "2026-01-03T17:31:32.87Z" }, + { url = "https://files.pythonhosted.org/packages/6a/ed/0a42b127a43712eda7807e7892c083eadfaf8429ca8fb619662a530a3aab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603", size = 1679455, upload-time = "2026-01-03T17:31:34.76Z" }, + { url = "https://files.pythonhosted.org/packages/2e/b5/c05f0c2b4b4fe2c9d55e73b6d3ed4fd6c9dc2684b1d81cbdf77e7fad9adb/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a", size = 1687417, upload-time = "2026-01-03T17:31:36.699Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6b/915bc5dad66aef602b9e459b5a973529304d4e89ca86999d9d75d80cbd0b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926", size = 1729968, upload-time = "2026-01-03T17:31:38.622Z" }, + { url = "https://files.pythonhosted.org/packages/11/3b/e84581290a9520024a08640b63d07673057aec5ca548177a82026187ba73/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba", size = 1545690, upload-time = "2026-01-03T17:31:40.57Z" }, + { url = "https://files.pythonhosted.org/packages/f5/04/0c3655a566c43fd647c81b895dfe361b9f9ad6d58c19309d45cff52d6c3b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c", size = 1746390, upload-time = "2026-01-03T17:31:42.857Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/71165b26978f719c3419381514c9690bd5980e764a09440a10bb816ea4ab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43", size = 1702188, upload-time = "2026-01-03T17:31:44.984Z" }, + { url = "https://files.pythonhosted.org/packages/29/a7/cbe6c9e8e136314fa1980da388a59d2f35f35395948a08b6747baebb6aa6/aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1", size = 433126, upload-time = "2026-01-03T17:31:47.463Z" }, + { url = "https://files.pythonhosted.org/packages/de/56/982704adea7d3b16614fc5936014e9af85c0e34b58f9046655817f04306e/aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984", size = 459128, upload-time = "2026-01-03T17:31:49.2Z" }, + { url = "https://files.pythonhosted.org/packages/6c/2a/3c79b638a9c3d4658d345339d22070241ea341ed4e07b5ac60fb0f418003/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c", size = 769512, upload-time = "2026-01-03T17:31:51.134Z" }, + { url = "https://files.pythonhosted.org/packages/29/b9/3e5014d46c0ab0db8707e0ac2711ed28c4da0218c358a4e7c17bae0d8722/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592", size = 506444, upload-time = "2026-01-03T17:31:52.85Z" }, + { url = "https://files.pythonhosted.org/packages/90/03/c1d4ef9a054e151cd7839cdc497f2638f00b93cbe8043983986630d7a80c/aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f", size = 510798, upload-time = "2026-01-03T17:31:54.91Z" }, + { url = "https://files.pythonhosted.org/packages/ea/76/8c1e5abbfe8e127c893fe7ead569148a4d5a799f7cf958d8c09f3eedf097/aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29", size = 1868835, upload-time = "2026-01-03T17:31:56.733Z" }, + { url = "https://files.pythonhosted.org/packages/8e/ac/984c5a6f74c363b01ff97adc96a3976d9c98940b8969a1881575b279ac5d/aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc", size = 1720486, upload-time = "2026-01-03T17:31:58.65Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9a/b7039c5f099c4eb632138728828b33428585031a1e658d693d41d07d89d1/aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2", size = 1847951, upload-time = "2026-01-03T17:32:00.989Z" }, + { url = "https://files.pythonhosted.org/packages/3c/02/3bec2b9a1ba3c19ff89a43a19324202b8eb187ca1e928d8bdac9bbdddebd/aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587", size = 1941001, upload-time = "2026-01-03T17:32:03.122Z" }, + { url = "https://files.pythonhosted.org/packages/37/df/d879401cedeef27ac4717f6426c8c36c3091c6e9f08a9178cc87549c537f/aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8", size = 1797246, upload-time = "2026-01-03T17:32:05.255Z" }, + { url = "https://files.pythonhosted.org/packages/8d/15/be122de1f67e6953add23335c8ece6d314ab67c8bebb3f181063010795a7/aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632", size = 1627131, upload-time = "2026-01-03T17:32:07.607Z" }, + { url = "https://files.pythonhosted.org/packages/12/12/70eedcac9134cfa3219ab7af31ea56bc877395b1ac30d65b1bc4b27d0438/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64", size = 1795196, upload-time = "2026-01-03T17:32:09.59Z" }, + { url = "https://files.pythonhosted.org/packages/32/11/b30e1b1cd1f3054af86ebe60df96989c6a414dd87e27ad16950eee420bea/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0", size = 1782841, upload-time = "2026-01-03T17:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/88/0d/d98a9367b38912384a17e287850f5695c528cff0f14f791ce8ee2e4f7796/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56", size = 1795193, upload-time = "2026-01-03T17:32:13.705Z" }, + { url = "https://files.pythonhosted.org/packages/43/a5/a2dfd1f5ff5581632c7f6a30e1744deda03808974f94f6534241ef60c751/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72", size = 1621979, upload-time = "2026-01-03T17:32:15.965Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f0/12973c382ae7c1cccbc4417e129c5bf54c374dfb85af70893646e1f0e749/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df", size = 1822193, upload-time = "2026-01-03T17:32:18.219Z" }, + { url = "https://files.pythonhosted.org/packages/3c/5f/24155e30ba7f8c96918af1350eb0663e2430aad9e001c0489d89cd708ab1/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa", size = 1769801, upload-time = "2026-01-03T17:32:20.25Z" }, + { url = "https://files.pythonhosted.org/packages/eb/f8/7314031ff5c10e6ece114da79b338ec17eeff3a079e53151f7e9f43c4723/aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767", size = 466523, upload-time = "2026-01-03T17:32:22.215Z" }, + { url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" }, ] [[package]] @@ -145,7 +145,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.75.0" +version = "0.77.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -157,22 +157,22 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/04/1f/08e95f4b7e2d35205ae5dcbb4ae97e7d477fc521c275c02609e2931ece2d/anthropic-0.75.0.tar.gz", hash = "sha256:e8607422f4ab616db2ea5baacc215dd5f028da99ce2f022e33c7c535b29f3dfb", size = 439565, upload-time = "2025-11-24T20:41:45.28Z" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/85/6cb5da3cf91de2eeea89726316e8c5c8c31e2d61ee7cb1233d7e95512c31/anthropic-0.77.0.tar.gz", hash = "sha256:ce36efeb80cb1e25430a88440dc0f9aa5c87f10d080ab70a1bdfd5c2c5fbedb4", size = 504575, upload-time = "2026-01-29T18:20:41.507Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/60/1c/1cd02b7ae64302a6e06724bf80a96401d5313708651d277b1458504a1730/anthropic-0.75.0-py3-none-any.whl", hash = "sha256:ea8317271b6c15d80225a9f3c670152746e88805a7a61e14d4a374577164965b", size = 388164, upload-time = "2025-11-24T20:41:43.587Z" }, + { url = "https://files.pythonhosted.org/packages/ac/27/9df785d3f94df9ac72f43ee9e14b8120b37d992b18f4952774ed46145022/anthropic-0.77.0-py3-none-any.whl", hash = "sha256:65cc83a3c82ce622d5c677d0d7706c77d29dc83958c6b10286e12fda6ffb2651", size = 397867, upload-time = "2026-01-29T18:20:39.481Z" }, ] [[package]] name = "anyio" -version = "4.12.0" +version = "4.12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "idna" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, + { url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" }, ] [[package]] @@ -204,20 +204,20 @@ wheels = [ [[package]] name = "cachetools" -version = "6.2.2" +version = "7.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fb/44/ca1675be2a83aeee1886ab745b28cda92093066590233cc501890eb8417a/cachetools-6.2.2.tar.gz", hash = "sha256:8e6d266b25e539df852251cfd6f990b4bc3a141db73b939058d809ebd2590fc6", size = 31571, upload-time = "2025-11-13T17:42:51.465Z" } +sdist = { url = "https://files.pythonhosted.org/packages/98/af/df70e9b65bc77a1cbe0768c0aa4617147f30f8306ded98c1744bcdc0ae1e/cachetools-7.0.0.tar.gz", hash = "sha256:a9abf18ff3b86c7d05b27ead412e235e16ae045925e531fae38d5fada5ed5b08", size = 35796, upload-time = "2026-02-01T18:59:47.411Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/46/eb6eca305c77a4489affe1c5d8f4cae82f285d9addd8de4ec084a7184221/cachetools-6.2.2-py3-none-any.whl", hash = "sha256:6c09c98183bf58560c97b2abfcedcbaf6a896a490f534b031b661d3723b45ace", size = 11503, upload-time = "2025-11-13T17:42:50.232Z" }, + { url = "https://files.pythonhosted.org/packages/28/df/2dd32cce20cbcf6f2ec456b58d44368161ad28320729f64e5e1d5d7bd0ae/cachetools-7.0.0-py3-none-any.whl", hash = "sha256:d52fef60e6e964a1969cfb61ccf6242a801b432790fe520d78720d757c81cbd2", size = 13487, upload-time = "2026-02-01T18:59:45.981Z" }, ] [[package]] name = "certifi" -version = "2025.11.12" +version = "2026.1.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, + { url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" }, ] [[package]] @@ -365,21 +365,21 @@ wheels = [ [[package]] name = "chuk-ai-session-manager" -version = "0.7.1" +version = "0.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "chuk-sessions" }, { name = "chuk-tool-processor" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/32/c5/93806ff3827ba1a01b792e367f54381e45f5f7c5d81b717d3b4761192a8d/chuk_ai_session_manager-0.7.1.tar.gz", hash = "sha256:21f73e34197f5195d584cf7385e818770b2fb26a815883813e5561e3a2be5f10", size = 75229, upload-time = "2025-06-23T09:04:27.032Z" } +sdist = { url = "https://files.pythonhosted.org/packages/23/3e/80fa3fbb444e8aa2b926d655282370b41be9f8af32bbbc84632e3fb9fc00/chuk_ai_session_manager-0.8.tar.gz", hash = "sha256:6c3ca0c2fe86099b445f358018d4a045fb46099c6c4bd07e9af42cd363bfbe10", size = 147225, upload-time = "2026-02-01T17:41:52.145Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/c9/cf472a0e4225d3f4104d1e78f91d307399dc8d29d56d46a9ccab256053dc/chuk_ai_session_manager-0.7.1-py3-none-any.whl", hash = "sha256:2df9396b45855835771bc18c6b6ad1e38dc21ef651a837c4822a902aecf61af7", size = 43326, upload-time = "2025-06-23T09:04:25.822Z" }, + { url = "https://files.pythonhosted.org/packages/c3/d9/0e940d7112d132d8f1dafbc8bf1c0619d7c581cf07347d677de1a6e0005f/chuk_ai_session_manager-0.8-py3-none-any.whl", hash = "sha256:78678af387495896484309b8371b0c4f48333cfa9ec0b16633b9c01ecb91a13d", size = 120245, upload-time = "2026-02-01T17:41:50.617Z" }, ] [[package]] name = "chuk-llm" -version = "0.14.3" +version = "0.17.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -400,23 +400,23 @@ dependencies = [ { name = "transformers" }, { name = "ujson" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/86/6c/26d60581db796ed8f5ce017fb56d999f570f86e1d25f9b6dea40708262e0/chuk_llm-0.14.3.tar.gz", hash = "sha256:0fb46994cc5c775346a32ef0dc00c37c4437919660ee67bf5fe67afe1c5b3a04", size = 298398, upload-time = "2025-11-24T23:13:31.05Z" } +sdist = { url = "https://files.pythonhosted.org/packages/06/82/b547c0317e895a42d9fdc13e0e1364492d757f434b0b2195eb6daea77ea9/chuk_llm-0.17.1.tar.gz", hash = "sha256:892a8f192d409827c8155796a89de84c09101a0f828cb6bed4afac95599107af", size = 301700, upload-time = "2025-12-14T15:29:05.973Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a7/56/519b8c668f934a6f006aa83f74bbabbedbc370fbb34c8d49d6325c5a2a3c/chuk_llm-0.14.3-py3-none-any.whl", hash = "sha256:b2b0cc464656636795c87cf49e98a78a16bb3920b664721b7ea1851357f3af51", size = 322341, upload-time = "2025-11-24T23:13:29.31Z" }, + { url = "https://files.pythonhosted.org/packages/9a/cc/16bea0206c3d446a7b4c7b3e28266fa4ba5663e0e912623c188be18b3909/chuk_llm-0.17.1-py3-none-any.whl", hash = "sha256:bda7a050206f5708a27d2d7d0d24e98833d2fce029584fe29c876ea611d501f3", size = 324791, upload-time = "2025-12-14T15:29:03.493Z" }, ] [[package]] name = "chuk-mcp" -version = "0.9" +version = "0.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, { name = "httpx" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d4/0f/b7f958e95d4c4313b6fcbdd7a7bd0212f61d32345b302c522d2a20fc6482/chuk_mcp-0.9.tar.gz", hash = "sha256:1544f85aa35d60c8f6a5017ec8f1ef659be238da4d1f21fcb4495ba87c91df31", size = 156868, upload-time = "2025-11-19T00:52:44.941Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/e9/68d7b5f3f99461580e0ef3c862b37c351be98e60e968fda5cf85939e2845/chuk_mcp-0.9.1.tar.gz", hash = "sha256:f318e4f06047784553c97763ede2172d82b12b74c18d20ca7a0cffea0b67d4f8", size = 158404, upload-time = "2025-12-07T10:34:09.162Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/ca/7d3381df501c6636a0eaa1f1ae3ff82b3a0f80ef830b8b0a7b3915617bc6/chuk_mcp-0.9-py3-none-any.whl", hash = "sha256:ea821711741db3802e25ad8c183d9933a267ca40a9ef176a2236e17a4ea06666", size = 140648, upload-time = "2025-11-19T00:52:43.527Z" }, + { url = "https://files.pythonhosted.org/packages/65/0b/633f6ab1962f8a11888ad4e6d7e5474ec75d61c8645c5a01db9ee8a5c156/chuk_mcp-0.9.1-py3-none-any.whl", hash = "sha256:aeac7dd9842467d7d99136c3ce99fe69013aa5d73e707456c30ca1fe9676cd8b", size = 141052, upload-time = "2025-12-07T10:34:07.517Z" }, ] [[package]] @@ -436,34 +436,34 @@ wheels = [ [[package]] name = "chuk-sessions" -version = "0.5.2" +version = "0.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "csrf" }, { name = "pydantic" }, { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6c/03/ee3dbdd3adcc306543f22e5a5f82dff66644c72effd8b5b6fcd70065be52/chuk_sessions-0.5.2.tar.gz", hash = "sha256:b250e1f0e55e87f992da8badf623ed5e46aa8140c462f7724fe931bf7ad6e556", size = 38840, upload-time = "2025-11-17T17:51:02.904Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e6/07/9d69646dc235b1dc11fe4583b6af2a0a85c497e1297749689cbc6322bc48/chuk_sessions-0.6.tar.gz", hash = "sha256:4c6b3893295f8748de7b4a869ea25806cd48bc3749135a292a980ccb2e5d20af", size = 40222, upload-time = "2026-01-07T16:34:49.427Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/e3/cdfe7e1da63668fd7121e3cddb6510a64ac0ac5d38471c3cdcade251872b/chuk_sessions-0.5.2-py3-none-any.whl", hash = "sha256:deb21d28ec36591ba79e804a06c8daa1246fa417ca22827d7035d97eb05bfd81", size = 31802, upload-time = "2025-11-17T17:51:01.753Z" }, + { url = "https://files.pythonhosted.org/packages/cc/0c/f39bd7906d0e12856930ecb52593931681894d961ee4859fe674113d86d5/chuk_sessions-0.6-py3-none-any.whl", hash = "sha256:5578ade0981eb795e62416fab22082bdeaa3dea0631a577239278f1b851c5ad0", size = 33040, upload-time = "2026-01-07T16:34:48.17Z" }, ] [[package]] name = "chuk-term" -version = "0.1.3" +version = "0.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "rich" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2d/e6/dbc5f4db978b9a5cc8926550a392cb8380c7688e1af94c311deafbe63452/chuk_term-0.1.3.tar.gz", hash = "sha256:5fbdfa06f7ed92589a606b1ba24dc97ee7305f2c9809228815065101e12b3513", size = 162500, upload-time = "2025-09-07T13:06:47.402Z" } +sdist = { url = "https://files.pythonhosted.org/packages/60/9d/b764eb12cee770051f232f4a72421c233b7ffcfe21d8cc45ff176a46d10e/chuk_term-0.3.tar.gz", hash = "sha256:067aa20a95aea20992cea4dcbcd94e5b04b9daa728d484aca0aa25b1f6f3b05e", size = 195821, upload-time = "2025-12-12T02:01:14.745Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/e0/a32960581e921e4e0713e55433a753e63b5a903c30d178ba421ff2ff6a2c/chuk_term-0.1.3-py3-none-any.whl", hash = "sha256:b18c2e44de6a4ff37f23c0bd0162e7215781fbd48ac3a624edbb8d3b2cc6a700", size = 41512, upload-time = "2025-09-07T13:06:45.926Z" }, + { url = "https://files.pythonhosted.org/packages/e8/e7/15f3f5cfa6587f422151b92d29b9adaa560681d10f84bb66ba17ee19ab1e/chuk_term-0.3-py3-none-any.whl", hash = "sha256:1ca06b2e5f7d1cbeb27fce3ff6a1d448a8cbaa02aa6757dbc56a40eb2938f712", size = 45775, upload-time = "2025-12-12T02:01:13.492Z" }, ] [[package]] name = "chuk-tool-processor" -version = "0.11.2" +version = "0.19" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "chuk-mcp" }, @@ -472,9 +472,9 @@ dependencies = [ { name = "pydantic" }, { name = "uuid" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e7/c8/849e496ae545f68364c50e273b343718c92f014b0df83585b825b8841be4/chuk_tool_processor-0.11.2.tar.gz", hash = "sha256:e8292356b8a2eb1c376f64c086ffae5b9d3ba7d9df3776fb14be276bb0750201", size = 189074, upload-time = "2025-11-20T16:29:16.052Z" } +sdist = { url = "https://files.pythonhosted.org/packages/61/7c/6ac59f37ab45ea807b75330d0fe01af839b565d2fb600859861f18ea3772/chuk_tool_processor-0.19.tar.gz", hash = "sha256:4fa11f9de46cf59def042b39d8c16d60fb15d025433ae9e29d214835253b0347", size = 271352, upload-time = "2025-12-16T16:45:44.649Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0b/57/844eb04c7337ed61dc16b37da7b381c04cdf256873683d68a5e62c8b2cd4/chuk_tool_processor-0.11.2-py3-none-any.whl", hash = "sha256:fc5e127ea111d8fce449696561e14d070a459305985435e9d09c8fbf7592d033", size = 175082, upload-time = "2025-11-20T16:29:14.276Z" }, + { url = "https://files.pythonhosted.org/packages/65/a7/d42185df660fa1233740f2e2c9a4c026b4118797a6d92193dd537cb0f7b6/chuk_tool_processor-0.19-py3-none-any.whl", hash = "sha256:3f61e6a81fe0fa73134d8fd8404bc7fd37ac6412cd154c4e93836e9e9ee370c5", size = 323285, upload-time = "2025-12-16T16:45:41.82Z" }, ] [[package]] @@ -500,89 +500,89 @@ wheels = [ [[package]] name = "coverage" -version = "7.12.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/89/26/4a96807b193b011588099c3b5c89fbb05294e5b90e71018e065465f34eb6/coverage-7.12.0.tar.gz", hash = "sha256:fc11e0a4e372cb5f282f16ef90d4a585034050ccda536451901abfb19a57f40c", size = 819341, upload-time = "2025-11-18T13:34:20.766Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/0c/0dfe7f0487477d96432e4815537263363fb6dd7289743a796e8e51eabdf2/coverage-7.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa124a3683d2af98bd9d9c2bfa7a5076ca7e5ab09fdb96b81fa7d89376ae928f", size = 217535, upload-time = "2025-11-18T13:32:08.812Z" }, - { url = "https://files.pythonhosted.org/packages/9b/f5/f9a4a053a5bbff023d3bec259faac8f11a1e5a6479c2ccf586f910d8dac7/coverage-7.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d93fbf446c31c0140208dcd07c5d882029832e8ed7891a39d6d44bd65f2316c3", size = 218044, upload-time = "2025-11-18T13:32:10.329Z" }, - { url = "https://files.pythonhosted.org/packages/95/c5/84fc3697c1fa10cd8571919bf9693f693b7373278daaf3b73e328d502bc8/coverage-7.12.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:52ca620260bd8cd6027317bdd8b8ba929be1d741764ee765b42c4d79a408601e", size = 248440, upload-time = "2025-11-18T13:32:12.536Z" }, - { url = "https://files.pythonhosted.org/packages/f4/36/2d93fbf6a04670f3874aed397d5a5371948a076e3249244a9e84fb0e02d6/coverage-7.12.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f3433ffd541380f3a0e423cff0f4926d55b0cc8c1d160fdc3be24a4c03aa65f7", size = 250361, upload-time = "2025-11-18T13:32:13.852Z" }, - { url = "https://files.pythonhosted.org/packages/5d/49/66dc65cc456a6bfc41ea3d0758c4afeaa4068a2b2931bf83be6894cf1058/coverage-7.12.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f7bbb321d4adc9f65e402c677cd1c8e4c2d0105d3ce285b51b4d87f1d5db5245", size = 252472, upload-time = "2025-11-18T13:32:15.068Z" }, - { url = "https://files.pythonhosted.org/packages/35/1f/ebb8a18dffd406db9fcd4b3ae42254aedcaf612470e8712f12041325930f/coverage-7.12.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22a7aade354a72dff3b59c577bfd18d6945c61f97393bc5fb7bd293a4237024b", size = 248592, upload-time = "2025-11-18T13:32:16.328Z" }, - { url = "https://files.pythonhosted.org/packages/da/a8/67f213c06e5ea3b3d4980df7dc344d7fea88240b5fe878a5dcbdfe0e2315/coverage-7.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3ff651dcd36d2fea66877cd4a82de478004c59b849945446acb5baf9379a1b64", size = 250167, upload-time = "2025-11-18T13:32:17.687Z" }, - { url = "https://files.pythonhosted.org/packages/f0/00/e52aef68154164ea40cc8389c120c314c747fe63a04b013a5782e989b77f/coverage-7.12.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:31b8b2e38391a56e3cea39d22a23faaa7c3fc911751756ef6d2621d2a9daf742", size = 248238, upload-time = "2025-11-18T13:32:19.2Z" }, - { url = "https://files.pythonhosted.org/packages/1f/a4/4d88750bcf9d6d66f77865e5a05a20e14db44074c25fd22519777cb69025/coverage-7.12.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:297bc2da28440f5ae51c845a47c8175a4db0553a53827886e4fb25c66633000c", size = 247964, upload-time = "2025-11-18T13:32:21.027Z" }, - { url = "https://files.pythonhosted.org/packages/a7/6b/b74693158899d5b47b0bf6238d2c6722e20ba749f86b74454fac0696bb00/coverage-7.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6ff7651cc01a246908eac162a6a86fc0dbab6de1ad165dfb9a1e2ec660b44984", size = 248862, upload-time = "2025-11-18T13:32:22.304Z" }, - { url = "https://files.pythonhosted.org/packages/18/de/6af6730227ce0e8ade307b1cc4a08e7f51b419a78d02083a86c04ccceb29/coverage-7.12.0-cp311-cp311-win32.whl", hash = "sha256:313672140638b6ddb2c6455ddeda41c6a0b208298034544cfca138978c6baed6", size = 220033, upload-time = "2025-11-18T13:32:23.714Z" }, - { url = "https://files.pythonhosted.org/packages/e2/a1/e7f63021a7c4fe20994359fcdeae43cbef4a4d0ca36a5a1639feeea5d9e1/coverage-7.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a1783ed5bd0d5938d4435014626568dc7f93e3cb99bc59188cc18857c47aa3c4", size = 220966, upload-time = "2025-11-18T13:32:25.599Z" }, - { url = "https://files.pythonhosted.org/packages/77/e8/deae26453f37c20c3aa0c4433a1e32cdc169bf415cce223a693117aa3ddd/coverage-7.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:4648158fd8dd9381b5847622df1c90ff314efbfc1df4550092ab6013c238a5fc", size = 219637, upload-time = "2025-11-18T13:32:27.265Z" }, - { url = "https://files.pythonhosted.org/packages/02/bf/638c0427c0f0d47638242e2438127f3c8ee3cfc06c7fdeb16778ed47f836/coverage-7.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:29644c928772c78512b48e14156b81255000dcfd4817574ff69def189bcb3647", size = 217704, upload-time = "2025-11-18T13:32:28.906Z" }, - { url = "https://files.pythonhosted.org/packages/08/e1/706fae6692a66c2d6b871a608bbde0da6281903fa0e9f53a39ed441da36a/coverage-7.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8638cbb002eaa5d7c8d04da667813ce1067080b9a91099801a0053086e52b736", size = 218064, upload-time = "2025-11-18T13:32:30.161Z" }, - { url = "https://files.pythonhosted.org/packages/a9/8b/eb0231d0540f8af3ffda39720ff43cb91926489d01524e68f60e961366e4/coverage-7.12.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:083631eeff5eb9992c923e14b810a179798bb598e6a0dd60586819fc23be6e60", size = 249560, upload-time = "2025-11-18T13:32:31.835Z" }, - { url = "https://files.pythonhosted.org/packages/e9/a1/67fb52af642e974d159b5b379e4d4c59d0ebe1288677fbd04bbffe665a82/coverage-7.12.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:99d5415c73ca12d558e07776bd957c4222c687b9f1d26fa0e1b57e3598bdcde8", size = 252318, upload-time = "2025-11-18T13:32:33.178Z" }, - { url = "https://files.pythonhosted.org/packages/41/e5/38228f31b2c7665ebf9bdfdddd7a184d56450755c7e43ac721c11a4b8dab/coverage-7.12.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e949ebf60c717c3df63adb4a1a366c096c8d7fd8472608cd09359e1bd48ef59f", size = 253403, upload-time = "2025-11-18T13:32:34.45Z" }, - { url = "https://files.pythonhosted.org/packages/ec/4b/df78e4c8188f9960684267c5a4897836f3f0f20a20c51606ee778a1d9749/coverage-7.12.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6d907ddccbca819afa2cd014bc69983b146cca2735a0b1e6259b2a6c10be1e70", size = 249984, upload-time = "2025-11-18T13:32:35.747Z" }, - { url = "https://files.pythonhosted.org/packages/ba/51/bb163933d195a345c6f63eab9e55743413d064c291b6220df754075c2769/coverage-7.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b1518ecbad4e6173f4c6e6c4a46e49555ea5679bf3feda5edb1b935c7c44e8a0", size = 251339, upload-time = "2025-11-18T13:32:37.352Z" }, - { url = "https://files.pythonhosted.org/packages/15/40/c9b29cdb8412c837cdcbc2cfa054547dd83affe6cbbd4ce4fdb92b6ba7d1/coverage-7.12.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:51777647a749abdf6f6fd8c7cffab12de68ab93aab15efc72fbbb83036c2a068", size = 249489, upload-time = "2025-11-18T13:32:39.212Z" }, - { url = "https://files.pythonhosted.org/packages/c8/da/b3131e20ba07a0de4437a50ef3b47840dfabf9293675b0cd5c2c7f66dd61/coverage-7.12.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:42435d46d6461a3b305cdfcad7cdd3248787771f53fe18305548cba474e6523b", size = 249070, upload-time = "2025-11-18T13:32:40.598Z" }, - { url = "https://files.pythonhosted.org/packages/70/81/b653329b5f6302c08d683ceff6785bc60a34be9ae92a5c7b63ee7ee7acec/coverage-7.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5bcead88c8423e1855e64b8057d0544e33e4080b95b240c2a355334bb7ced937", size = 250929, upload-time = "2025-11-18T13:32:42.915Z" }, - { url = "https://files.pythonhosted.org/packages/a3/00/250ac3bca9f252a5fb1338b5ad01331ebb7b40223f72bef5b1b2cb03aa64/coverage-7.12.0-cp312-cp312-win32.whl", hash = "sha256:dcbb630ab034e86d2a0f79aefd2be07e583202f41e037602d438c80044957baa", size = 220241, upload-time = "2025-11-18T13:32:44.665Z" }, - { url = "https://files.pythonhosted.org/packages/64/1c/77e79e76d37ce83302f6c21980b45e09f8aa4551965213a10e62d71ce0ab/coverage-7.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:2fd8354ed5d69775ac42986a691fbf68b4084278710cee9d7c3eaa0c28fa982a", size = 221051, upload-time = "2025-11-18T13:32:46.008Z" }, - { url = "https://files.pythonhosted.org/packages/31/f5/641b8a25baae564f9e52cac0e2667b123de961985709a004e287ee7663cc/coverage-7.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:737c3814903be30695b2de20d22bcc5428fdae305c61ba44cdc8b3252984c49c", size = 219692, upload-time = "2025-11-18T13:32:47.372Z" }, - { url = "https://files.pythonhosted.org/packages/b8/14/771700b4048774e48d2c54ed0c674273702713c9ee7acdfede40c2666747/coverage-7.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:47324fffca8d8eae7e185b5bb20c14645f23350f870c1649003618ea91a78941", size = 217725, upload-time = "2025-11-18T13:32:49.22Z" }, - { url = "https://files.pythonhosted.org/packages/17/a7/3aa4144d3bcb719bf67b22d2d51c2d577bf801498c13cb08f64173e80497/coverage-7.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ccf3b2ede91decd2fb53ec73c1f949c3e034129d1e0b07798ff1d02ea0c8fa4a", size = 218098, upload-time = "2025-11-18T13:32:50.78Z" }, - { url = "https://files.pythonhosted.org/packages/fc/9c/b846bbc774ff81091a12a10203e70562c91ae71badda00c5ae5b613527b1/coverage-7.12.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b365adc70a6936c6b0582dc38746b33b2454148c02349345412c6e743efb646d", size = 249093, upload-time = "2025-11-18T13:32:52.554Z" }, - { url = "https://files.pythonhosted.org/packages/76/b6/67d7c0e1f400b32c883e9342de4a8c2ae7c1a0b57c5de87622b7262e2309/coverage-7.12.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bc13baf85cd8a4cfcf4a35c7bc9d795837ad809775f782f697bf630b7e200211", size = 251686, upload-time = "2025-11-18T13:32:54.862Z" }, - { url = "https://files.pythonhosted.org/packages/cc/75/b095bd4b39d49c3be4bffbb3135fea18a99a431c52dd7513637c0762fecb/coverage-7.12.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:099d11698385d572ceafb3288a5b80fe1fc58bf665b3f9d362389de488361d3d", size = 252930, upload-time = "2025-11-18T13:32:56.417Z" }, - { url = "https://files.pythonhosted.org/packages/6e/f3/466f63015c7c80550bead3093aacabf5380c1220a2a93c35d374cae8f762/coverage-7.12.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:473dc45d69694069adb7680c405fb1e81f60b2aff42c81e2f2c3feaf544d878c", size = 249296, upload-time = "2025-11-18T13:32:58.074Z" }, - { url = "https://files.pythonhosted.org/packages/27/86/eba2209bf2b7e28c68698fc13437519a295b2d228ba9e0ec91673e09fa92/coverage-7.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:583f9adbefd278e9de33c33d6846aa8f5d164fa49b47144180a0e037f0688bb9", size = 251068, upload-time = "2025-11-18T13:32:59.646Z" }, - { url = "https://files.pythonhosted.org/packages/ec/55/ca8ae7dbba962a3351f18940b359b94c6bafdd7757945fdc79ec9e452dc7/coverage-7.12.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2089cc445f2dc0af6f801f0d1355c025b76c24481935303cf1af28f636688f0", size = 249034, upload-time = "2025-11-18T13:33:01.481Z" }, - { url = "https://files.pythonhosted.org/packages/7a/d7/39136149325cad92d420b023b5fd900dabdd1c3a0d1d5f148ef4a8cedef5/coverage-7.12.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:950411f1eb5d579999c5f66c62a40961f126fc71e5e14419f004471957b51508", size = 248853, upload-time = "2025-11-18T13:33:02.935Z" }, - { url = "https://files.pythonhosted.org/packages/fe/b6/76e1add8b87ef60e00643b0b7f8f7bb73d4bf5249a3be19ebefc5793dd25/coverage-7.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b1aab7302a87bafebfe76b12af681b56ff446dc6f32ed178ff9c092ca776e6bc", size = 250619, upload-time = "2025-11-18T13:33:04.336Z" }, - { url = "https://files.pythonhosted.org/packages/95/87/924c6dc64f9203f7a3c1832a6a0eee5a8335dbe5f1bdadcc278d6f1b4d74/coverage-7.12.0-cp313-cp313-win32.whl", hash = "sha256:d7e0d0303c13b54db495eb636bc2465b2fb8475d4c8bcec8fe4b5ca454dfbae8", size = 220261, upload-time = "2025-11-18T13:33:06.493Z" }, - { url = "https://files.pythonhosted.org/packages/91/77/dd4aff9af16ff776bf355a24d87eeb48fc6acde54c907cc1ea89b14a8804/coverage-7.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:ce61969812d6a98a981d147d9ac583a36ac7db7766f2e64a9d4d059c2fe29d07", size = 221072, upload-time = "2025-11-18T13:33:07.926Z" }, - { url = "https://files.pythonhosted.org/packages/70/49/5c9dc46205fef31b1b226a6e16513193715290584317fd4df91cdaf28b22/coverage-7.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:bcec6f47e4cb8a4c2dc91ce507f6eefc6a1b10f58df32cdc61dff65455031dfc", size = 219702, upload-time = "2025-11-18T13:33:09.631Z" }, - { url = "https://files.pythonhosted.org/packages/9b/62/f87922641c7198667994dd472a91e1d9b829c95d6c29529ceb52132436ad/coverage-7.12.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:459443346509476170d553035e4a3eed7b860f4fe5242f02de1010501956ce87", size = 218420, upload-time = "2025-11-18T13:33:11.153Z" }, - { url = "https://files.pythonhosted.org/packages/85/dd/1cc13b2395ef15dbb27d7370a2509b4aee77890a464fb35d72d428f84871/coverage-7.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:04a79245ab2b7a61688958f7a855275997134bc84f4a03bc240cf64ff132abf6", size = 218773, upload-time = "2025-11-18T13:33:12.569Z" }, - { url = "https://files.pythonhosted.org/packages/74/40/35773cc4bb1e9d4658d4fb669eb4195b3151bef3bbd6f866aba5cd5dac82/coverage-7.12.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:09a86acaaa8455f13d6a99221d9654df249b33937b4e212b4e5a822065f12aa7", size = 260078, upload-time = "2025-11-18T13:33:14.037Z" }, - { url = "https://files.pythonhosted.org/packages/ec/ee/231bb1a6ffc2905e396557585ebc6bdc559e7c66708376d245a1f1d330fc/coverage-7.12.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:907e0df1b71ba77463687a74149c6122c3f6aac56c2510a5d906b2f368208560", size = 262144, upload-time = "2025-11-18T13:33:15.601Z" }, - { url = "https://files.pythonhosted.org/packages/28/be/32f4aa9f3bf0b56f3971001b56508352c7753915345d45fab4296a986f01/coverage-7.12.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9b57e2d0ddd5f0582bae5437c04ee71c46cd908e7bc5d4d0391f9a41e812dd12", size = 264574, upload-time = "2025-11-18T13:33:17.354Z" }, - { url = "https://files.pythonhosted.org/packages/68/7c/00489fcbc2245d13ab12189b977e0cf06ff3351cb98bc6beba8bd68c5902/coverage-7.12.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:58c1c6aa677f3a1411fe6fb28ec3a942e4f665df036a3608816e0847fad23296", size = 259298, upload-time = "2025-11-18T13:33:18.958Z" }, - { url = "https://files.pythonhosted.org/packages/96/b4/f0760d65d56c3bea95b449e02570d4abd2549dc784bf39a2d4721a2d8ceb/coverage-7.12.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4c589361263ab2953e3c4cd2a94db94c4ad4a8e572776ecfbad2389c626e4507", size = 262150, upload-time = "2025-11-18T13:33:20.644Z" }, - { url = "https://files.pythonhosted.org/packages/c5/71/9a9314df00f9326d78c1e5a910f520d599205907432d90d1c1b7a97aa4b1/coverage-7.12.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:91b810a163ccad2e43b1faa11d70d3cf4b6f3d83f9fd5f2df82a32d47b648e0d", size = 259763, upload-time = "2025-11-18T13:33:22.189Z" }, - { url = "https://files.pythonhosted.org/packages/10/34/01a0aceed13fbdf925876b9a15d50862eb8845454301fe3cdd1df08b2182/coverage-7.12.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:40c867af715f22592e0d0fb533a33a71ec9e0f73a6945f722a0c85c8c1cbe3a2", size = 258653, upload-time = "2025-11-18T13:33:24.239Z" }, - { url = "https://files.pythonhosted.org/packages/8d/04/81d8fd64928acf1574bbb0181f66901c6c1c6279c8ccf5f84259d2c68ae9/coverage-7.12.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:68b0d0a2d84f333de875666259dadf28cc67858bc8fd8b3f1eae84d3c2bec455", size = 260856, upload-time = "2025-11-18T13:33:26.365Z" }, - { url = "https://files.pythonhosted.org/packages/f2/76/fa2a37bfaeaf1f766a2d2360a25a5297d4fb567098112f6517475eee120b/coverage-7.12.0-cp313-cp313t-win32.whl", hash = "sha256:73f9e7fbd51a221818fd11b7090eaa835a353ddd59c236c57b2199486b116c6d", size = 220936, upload-time = "2025-11-18T13:33:28.165Z" }, - { url = "https://files.pythonhosted.org/packages/f9/52/60f64d932d555102611c366afb0eb434b34266b1d9266fc2fe18ab641c47/coverage-7.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:24cff9d1f5743f67db7ba46ff284018a6e9aeb649b67aa1e70c396aa1b7cb23c", size = 222001, upload-time = "2025-11-18T13:33:29.656Z" }, - { url = "https://files.pythonhosted.org/packages/77/df/c303164154a5a3aea7472bf323b7c857fed93b26618ed9fc5c2955566bb0/coverage-7.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c87395744f5c77c866d0f5a43d97cc39e17c7f1cb0115e54a2fe67ca75c5d14d", size = 220273, upload-time = "2025-11-18T13:33:31.415Z" }, - { url = "https://files.pythonhosted.org/packages/bf/2e/fc12db0883478d6e12bbd62d481210f0c8daf036102aa11434a0c5755825/coverage-7.12.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a1c59b7dc169809a88b21a936eccf71c3895a78f5592051b1af8f4d59c2b4f92", size = 217777, upload-time = "2025-11-18T13:33:32.86Z" }, - { url = "https://files.pythonhosted.org/packages/1f/c1/ce3e525d223350c6ec16b9be8a057623f54226ef7f4c2fee361ebb6a02b8/coverage-7.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8787b0f982e020adb732b9f051f3e49dd5054cebbc3f3432061278512a2b1360", size = 218100, upload-time = "2025-11-18T13:33:34.532Z" }, - { url = "https://files.pythonhosted.org/packages/15/87/113757441504aee3808cb422990ed7c8bcc2d53a6779c66c5adef0942939/coverage-7.12.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5ea5a9f7dc8877455b13dd1effd3202e0bca72f6f3ab09f9036b1bcf728f69ac", size = 249151, upload-time = "2025-11-18T13:33:36.135Z" }, - { url = "https://files.pythonhosted.org/packages/d9/1d/9529d9bd44049b6b05bb319c03a3a7e4b0a8a802d28fa348ad407e10706d/coverage-7.12.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fdba9f15849534594f60b47c9a30bc70409b54947319a7c4fd0e8e3d8d2f355d", size = 251667, upload-time = "2025-11-18T13:33:37.996Z" }, - { url = "https://files.pythonhosted.org/packages/11/bb/567e751c41e9c03dc29d3ce74b8c89a1e3396313e34f255a2a2e8b9ebb56/coverage-7.12.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a00594770eb715854fb1c57e0dea08cce6720cfbc531accdb9850d7c7770396c", size = 253003, upload-time = "2025-11-18T13:33:39.553Z" }, - { url = "https://files.pythonhosted.org/packages/e4/b3/c2cce2d8526a02fb9e9ca14a263ca6fc074449b33a6afa4892838c903528/coverage-7.12.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5560c7e0d82b42eb1951e4f68f071f8017c824ebfd5a6ebe42c60ac16c6c2434", size = 249185, upload-time = "2025-11-18T13:33:42.086Z" }, - { url = "https://files.pythonhosted.org/packages/0e/a7/967f93bb66e82c9113c66a8d0b65ecf72fc865adfba5a145f50c7af7e58d/coverage-7.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2e26b481c9159c2773a37947a9718cfdc58893029cdfb177531793e375cfc", size = 251025, upload-time = "2025-11-18T13:33:43.634Z" }, - { url = "https://files.pythonhosted.org/packages/b9/b2/f2f6f56337bc1af465d5b2dc1ee7ee2141b8b9272f3bf6213fcbc309a836/coverage-7.12.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6e1a8c066dabcde56d5d9fed6a66bc19a2883a3fe051f0c397a41fc42aedd4cc", size = 248979, upload-time = "2025-11-18T13:33:46.04Z" }, - { url = "https://files.pythonhosted.org/packages/f4/7a/bf4209f45a4aec09d10a01a57313a46c0e0e8f4c55ff2965467d41a92036/coverage-7.12.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:f7ba9da4726e446d8dd8aae5a6cd872511184a5d861de80a86ef970b5dacce3e", size = 248800, upload-time = "2025-11-18T13:33:47.546Z" }, - { url = "https://files.pythonhosted.org/packages/b8/b7/1e01b8696fb0521810f60c5bbebf699100d6754183e6cc0679bf2ed76531/coverage-7.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e0f483ab4f749039894abaf80c2f9e7ed77bbf3c737517fb88c8e8e305896a17", size = 250460, upload-time = "2025-11-18T13:33:49.537Z" }, - { url = "https://files.pythonhosted.org/packages/71/ae/84324fb9cb46c024760e706353d9b771a81b398d117d8c1fe010391c186f/coverage-7.12.0-cp314-cp314-win32.whl", hash = "sha256:76336c19a9ef4a94b2f8dc79f8ac2da3f193f625bb5d6f51a328cd19bfc19933", size = 220533, upload-time = "2025-11-18T13:33:51.16Z" }, - { url = "https://files.pythonhosted.org/packages/e2/71/1033629deb8460a8f97f83e6ac4ca3b93952e2b6f826056684df8275e015/coverage-7.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:7c1059b600aec6ef090721f8f633f60ed70afaffe8ecab85b59df748f24b31fe", size = 221348, upload-time = "2025-11-18T13:33:52.776Z" }, - { url = "https://files.pythonhosted.org/packages/0a/5f/ac8107a902f623b0c251abdb749be282dc2ab61854a8a4fcf49e276fce2f/coverage-7.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:172cf3a34bfef42611963e2b661302a8931f44df31629e5b1050567d6b90287d", size = 219922, upload-time = "2025-11-18T13:33:54.316Z" }, - { url = "https://files.pythonhosted.org/packages/79/6e/f27af2d4da367f16077d21ef6fe796c874408219fa6dd3f3efe7751bd910/coverage-7.12.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:aa7d48520a32cb21c7a9b31f81799e8eaec7239db36c3b670be0fa2403828d1d", size = 218511, upload-time = "2025-11-18T13:33:56.343Z" }, - { url = "https://files.pythonhosted.org/packages/67/dd/65fd874aa460c30da78f9d259400d8e6a4ef457d61ab052fd248f0050558/coverage-7.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:90d58ac63bc85e0fb919f14d09d6caa63f35a5512a2205284b7816cafd21bb03", size = 218771, upload-time = "2025-11-18T13:33:57.966Z" }, - { url = "https://files.pythonhosted.org/packages/55/e0/7c6b71d327d8068cb79c05f8f45bf1b6145f7a0de23bbebe63578fe5240a/coverage-7.12.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ca8ecfa283764fdda3eae1bdb6afe58bf78c2c3ec2b2edcb05a671f0bba7b3f9", size = 260151, upload-time = "2025-11-18T13:33:59.597Z" }, - { url = "https://files.pythonhosted.org/packages/49/ce/4697457d58285b7200de6b46d606ea71066c6e674571a946a6ea908fb588/coverage-7.12.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:874fe69a0785d96bd066059cd4368022cebbec1a8958f224f0016979183916e6", size = 262257, upload-time = "2025-11-18T13:34:01.166Z" }, - { url = "https://files.pythonhosted.org/packages/2f/33/acbc6e447aee4ceba88c15528dbe04a35fb4d67b59d393d2e0d6f1e242c1/coverage-7.12.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5b3c889c0b8b283a24d721a9eabc8ccafcfc3aebf167e4cd0d0e23bf8ec4e339", size = 264671, upload-time = "2025-11-18T13:34:02.795Z" }, - { url = "https://files.pythonhosted.org/packages/87/ec/e2822a795c1ed44d569980097be839c5e734d4c0c1119ef8e0a073496a30/coverage-7.12.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8bb5b894b3ec09dcd6d3743229dc7f2c42ef7787dc40596ae04c0edda487371e", size = 259231, upload-time = "2025-11-18T13:34:04.397Z" }, - { url = "https://files.pythonhosted.org/packages/72/c5/a7ec5395bb4a49c9b7ad97e63f0c92f6bf4a9e006b1393555a02dae75f16/coverage-7.12.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:79a44421cd5fba96aa57b5e3b5a4d3274c449d4c622e8f76882d76635501fd13", size = 262137, upload-time = "2025-11-18T13:34:06.068Z" }, - { url = "https://files.pythonhosted.org/packages/67/0c/02c08858b764129f4ecb8e316684272972e60777ae986f3865b10940bdd6/coverage-7.12.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:33baadc0efd5c7294f436a632566ccc1f72c867f82833eb59820ee37dc811c6f", size = 259745, upload-time = "2025-11-18T13:34:08.04Z" }, - { url = "https://files.pythonhosted.org/packages/5a/04/4fd32b7084505f3829a8fe45c1a74a7a728cb251aaadbe3bec04abcef06d/coverage-7.12.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:c406a71f544800ef7e9e0000af706b88465f3573ae8b8de37e5f96c59f689ad1", size = 258570, upload-time = "2025-11-18T13:34:09.676Z" }, - { url = "https://files.pythonhosted.org/packages/48/35/2365e37c90df4f5342c4fa202223744119fe31264ee2924f09f074ea9b6d/coverage-7.12.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e71bba6a40883b00c6d571599b4627f50c360b3d0d02bfc658168936be74027b", size = 260899, upload-time = "2025-11-18T13:34:11.259Z" }, - { url = "https://files.pythonhosted.org/packages/05/56/26ab0464ca733fa325e8e71455c58c1c374ce30f7c04cebb88eabb037b18/coverage-7.12.0-cp314-cp314t-win32.whl", hash = "sha256:9157a5e233c40ce6613dead4c131a006adfda70e557b6856b97aceed01b0e27a", size = 221313, upload-time = "2025-11-18T13:34:12.863Z" }, - { url = "https://files.pythonhosted.org/packages/da/1c/017a3e1113ed34d998b27d2c6dba08a9e7cb97d362f0ec988fcd873dcf81/coverage-7.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:e84da3a0fd233aeec797b981c51af1cabac74f9bd67be42458365b30d11b5291", size = 222423, upload-time = "2025-11-18T13:34:15.14Z" }, - { url = "https://files.pythonhosted.org/packages/4c/36/bcc504fdd5169301b52568802bb1b9cdde2e27a01d39fbb3b4b508ab7c2c/coverage-7.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:01d24af36fedda51c2b1aca56e4330a3710f83b02a5ff3743a6b015ffa7c9384", size = 220459, upload-time = "2025-11-18T13:34:17.222Z" }, - { url = "https://files.pythonhosted.org/packages/ce/a3/43b749004e3c09452e39bb56347a008f0a0668aad37324a99b5c8ca91d9e/coverage-7.12.0-py3-none-any.whl", hash = "sha256:159d50c0b12e060b15ed3d39f87ed43d4f7f7ad40b8a534f4dd331adbb51104a", size = 209503, upload-time = "2025-11-18T13:34:18.892Z" }, +version = "7.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ad/49/349848445b0e53660e258acbcc9b0d014895b6739237920886672240f84b/coverage-7.13.2.tar.gz", hash = "sha256:044c6951ec37146b72a50cc81ef02217d27d4c3640efd2640311393cbbf143d3", size = 826523, upload-time = "2026-01-25T13:00:04.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/01/abca50583a8975bb6e1c59eff67ed8e48bb127c07dad5c28d9e96ccc09ec/coverage-7.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:060ebf6f2c51aff5ba38e1f43a2095e087389b1c69d559fde6049a4b0001320e", size = 218971, upload-time = "2026-01-25T12:57:36.953Z" }, + { url = "https://files.pythonhosted.org/packages/eb/0e/b6489f344d99cd1e5b4d5e1be52dfd3f8a3dc5112aa6c33948da8cabad4e/coverage-7.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1ea8ca9db5e7469cd364552985e15911548ea5b69c48a17291f0cac70484b2e", size = 219473, upload-time = "2026-01-25T12:57:38.934Z" }, + { url = "https://files.pythonhosted.org/packages/17/11/db2f414915a8e4ec53f60b17956c27f21fb68fcf20f8a455ce7c2ccec638/coverage-7.13.2-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b780090d15fd58f07cf2011943e25a5f0c1c894384b13a216b6c86c8a8a7c508", size = 249896, upload-time = "2026-01-25T12:57:40.365Z" }, + { url = "https://files.pythonhosted.org/packages/80/06/0823fe93913663c017e508e8810c998c8ebd3ec2a5a85d2c3754297bdede/coverage-7.13.2-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:88a800258d83acb803c38175b4495d293656d5fac48659c953c18e5f539a274b", size = 251810, upload-time = "2026-01-25T12:57:42.045Z" }, + { url = "https://files.pythonhosted.org/packages/61/dc/b151c3cc41b28cdf7f0166c5fa1271cbc305a8ec0124cce4b04f74791a18/coverage-7.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6326e18e9a553e674d948536a04a80d850a5eeefe2aae2e6d7cf05d54046c01b", size = 253920, upload-time = "2026-01-25T12:57:44.026Z" }, + { url = "https://files.pythonhosted.org/packages/2d/35/e83de0556e54a4729a2b94ea816f74ce08732e81945024adee46851c2264/coverage-7.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:59562de3f797979e1ff07c587e2ac36ba60ca59d16c211eceaa579c266c5022f", size = 250025, upload-time = "2026-01-25T12:57:45.624Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/af2eb9c3926ce3ea0d58a0d2516fcbdacf7a9fc9559fe63076beaf3f2596/coverage-7.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:27ba1ed6f66b0e2d61bfa78874dffd4f8c3a12f8e2b5410e515ab345ba7bc9c3", size = 251612, upload-time = "2026-01-25T12:57:47.713Z" }, + { url = "https://files.pythonhosted.org/packages/26/62/5be2e25f3d6c711d23b71296f8b44c978d4c8b4e5b26871abfc164297502/coverage-7.13.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8be48da4d47cc68754ce643ea50b3234557cbefe47c2f120495e7bd0a2756f2b", size = 249670, upload-time = "2026-01-25T12:57:49.378Z" }, + { url = "https://files.pythonhosted.org/packages/b3/51/400d1b09a8344199f9b6a6fc1868005d766b7ea95e7882e494fa862ca69c/coverage-7.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2a47a4223d3361b91176aedd9d4e05844ca67d7188456227b6bf5e436630c9a1", size = 249395, upload-time = "2026-01-25T12:57:50.86Z" }, + { url = "https://files.pythonhosted.org/packages/e0/36/f02234bc6e5230e2f0a63fd125d0a2093c73ef20fdf681c7af62a140e4e7/coverage-7.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c6f141b468740197d6bd38f2b26ade124363228cc3f9858bd9924ab059e00059", size = 250298, upload-time = "2026-01-25T12:57:52.287Z" }, + { url = "https://files.pythonhosted.org/packages/b0/06/713110d3dd3151b93611c9cbfc65c15b4156b44f927fced49ac0b20b32a4/coverage-7.13.2-cp311-cp311-win32.whl", hash = "sha256:89567798404af067604246e01a49ef907d112edf2b75ef814b1364d5ce267031", size = 221485, upload-time = "2026-01-25T12:57:53.876Z" }, + { url = "https://files.pythonhosted.org/packages/16/0c/3ae6255fa1ebcb7dec19c9a59e85ef5f34566d1265c70af5b2fc981da834/coverage-7.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:21dd57941804ae2ac7e921771a5e21bbf9aabec317a041d164853ad0a96ce31e", size = 222421, upload-time = "2026-01-25T12:57:55.433Z" }, + { url = "https://files.pythonhosted.org/packages/b5/37/fabc3179af4d61d89ea47bd04333fec735cd5e8b59baad44fed9fc4170d7/coverage-7.13.2-cp311-cp311-win_arm64.whl", hash = "sha256:10758e0586c134a0bafa28f2d37dd2cdb5e4a90de25c0fc0c77dabbad46eca28", size = 221088, upload-time = "2026-01-25T12:57:57.41Z" }, + { url = "https://files.pythonhosted.org/packages/46/39/e92a35f7800222d3f7b2cbb7bbc3b65672ae8d501cb31801b2d2bd7acdf1/coverage-7.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f106b2af193f965d0d3234f3f83fc35278c7fb935dfbde56ae2da3dd2c03b84d", size = 219142, upload-time = "2026-01-25T12:58:00.448Z" }, + { url = "https://files.pythonhosted.org/packages/45/7a/8bf9e9309c4c996e65c52a7c5a112707ecdd9fbaf49e10b5a705a402bbb4/coverage-7.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f45d21dc4d5d6bd29323f0320089ef7eae16e4bef712dff79d184fa7330af3", size = 219503, upload-time = "2026-01-25T12:58:02.451Z" }, + { url = "https://files.pythonhosted.org/packages/87/93/17661e06b7b37580923f3f12406ac91d78aeed293fb6da0b69cc7957582f/coverage-7.13.2-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:fae91dfecd816444c74531a9c3d6ded17a504767e97aa674d44f638107265b99", size = 251006, upload-time = "2026-01-25T12:58:04.059Z" }, + { url = "https://files.pythonhosted.org/packages/12/f0/f9e59fb8c310171497f379e25db060abef9fa605e09d63157eebec102676/coverage-7.13.2-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:264657171406c114787b441484de620e03d8f7202f113d62fcd3d9688baa3e6f", size = 253750, upload-time = "2026-01-25T12:58:05.574Z" }, + { url = "https://files.pythonhosted.org/packages/e5/b1/1935e31add2232663cf7edd8269548b122a7d100047ff93475dbaaae673e/coverage-7.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae47d8dcd3ded0155afbb59c62bd8ab07ea0fd4902e1c40567439e6db9dcaf2f", size = 254862, upload-time = "2026-01-25T12:58:07.647Z" }, + { url = "https://files.pythonhosted.org/packages/af/59/b5e97071ec13df5f45da2b3391b6cdbec78ba20757bc92580a5b3d5fa53c/coverage-7.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a0b33e9fd838220b007ce8f299114d406c1e8edb21336af4c97a26ecfd185aa", size = 251420, upload-time = "2026-01-25T12:58:09.309Z" }, + { url = "https://files.pythonhosted.org/packages/3f/75/9495932f87469d013dc515fb0ce1aac5fa97766f38f6b1a1deb1ee7b7f3a/coverage-7.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b3becbea7f3ce9a2d4d430f223ec15888e4deb31395840a79e916368d6004cce", size = 252786, upload-time = "2026-01-25T12:58:10.909Z" }, + { url = "https://files.pythonhosted.org/packages/6a/59/af550721f0eb62f46f7b8cb7e6f1860592189267b1c411a4e3a057caacee/coverage-7.13.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f819c727a6e6eeb8711e4ce63d78c620f69630a2e9d53bc95ca5379f57b6ba94", size = 250928, upload-time = "2026-01-25T12:58:12.449Z" }, + { url = "https://files.pythonhosted.org/packages/9b/b1/21b4445709aae500be4ab43bbcfb4e53dc0811c3396dcb11bf9f23fd0226/coverage-7.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:4f7b71757a3ab19f7ba286e04c181004c1d61be921795ee8ba6970fd0ec91da5", size = 250496, upload-time = "2026-01-25T12:58:14.047Z" }, + { url = "https://files.pythonhosted.org/packages/ba/b1/0f5d89dfe0392990e4f3980adbde3eb34885bc1effb2dc369e0bf385e389/coverage-7.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b7fc50d2afd2e6b4f6f2f403b70103d280a8e0cb35320cbbe6debcda02a1030b", size = 252373, upload-time = "2026-01-25T12:58:15.976Z" }, + { url = "https://files.pythonhosted.org/packages/01/c9/0cf1a6a57a9968cc049a6b896693faa523c638a5314b1fc374eb2b2ac904/coverage-7.13.2-cp312-cp312-win32.whl", hash = "sha256:292250282cf9bcf206b543d7608bda17ca6fc151f4cbae949fc7e115112fbd41", size = 221696, upload-time = "2026-01-25T12:58:17.517Z" }, + { url = "https://files.pythonhosted.org/packages/4d/05/d7540bf983f09d32803911afed135524570f8c47bb394bf6206c1dc3a786/coverage-7.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:eeea10169fac01549a7921d27a3e517194ae254b542102267bef7a93ed38c40e", size = 222504, upload-time = "2026-01-25T12:58:19.115Z" }, + { url = "https://files.pythonhosted.org/packages/15/8b/1a9f037a736ced0a12aacf6330cdaad5008081142a7070bc58b0f7930cbc/coverage-7.13.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a5b567f0b635b592c917f96b9a9cb3dbd4c320d03f4bf94e9084e494f2e8894", size = 221120, upload-time = "2026-01-25T12:58:21.334Z" }, + { url = "https://files.pythonhosted.org/packages/a7/f0/3d3eac7568ab6096ff23791a526b0048a1ff3f49d0e236b2af6fb6558e88/coverage-7.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ed75de7d1217cf3b99365d110975f83af0528c849ef5180a12fd91b5064df9d6", size = 219168, upload-time = "2026-01-25T12:58:23.376Z" }, + { url = "https://files.pythonhosted.org/packages/a3/a6/f8b5cfeddbab95fdef4dcd682d82e5dcff7a112ced57a959f89537ee9995/coverage-7.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97e596de8fa9bada4d88fde64a3f4d37f1b6131e4faa32bad7808abc79887ddc", size = 219537, upload-time = "2026-01-25T12:58:24.932Z" }, + { url = "https://files.pythonhosted.org/packages/7b/e6/8d8e6e0c516c838229d1e41cadcec91745f4b1031d4db17ce0043a0423b4/coverage-7.13.2-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:68c86173562ed4413345410c9480a8d64864ac5e54a5cda236748031e094229f", size = 250528, upload-time = "2026-01-25T12:58:26.567Z" }, + { url = "https://files.pythonhosted.org/packages/8e/78/befa6640f74092b86961f957f26504c8fba3d7da57cc2ab7407391870495/coverage-7.13.2-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7be4d613638d678b2b3773b8f687537b284d7074695a43fe2fbbfc0e31ceaed1", size = 253132, upload-time = "2026-01-25T12:58:28.251Z" }, + { url = "https://files.pythonhosted.org/packages/9d/10/1630db1edd8ce675124a2ee0f7becc603d2bb7b345c2387b4b95c6907094/coverage-7.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7f63ce526a96acd0e16c4af8b50b64334239550402fb1607ce6a584a6d62ce9", size = 254374, upload-time = "2026-01-25T12:58:30.294Z" }, + { url = "https://files.pythonhosted.org/packages/ed/1d/0d9381647b1e8e6d310ac4140be9c428a0277330991e0c35bdd751e338a4/coverage-7.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:406821f37f864f968e29ac14c3fccae0fec9fdeba48327f0341decf4daf92d7c", size = 250762, upload-time = "2026-01-25T12:58:32.036Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5636dfc9a7c871ee8776af83ee33b4c26bc508ad6cee1e89b6419a366582/coverage-7.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ee68e5a4e3e5443623406b905db447dceddffee0dceb39f4e0cd9ec2a35004b5", size = 252502, upload-time = "2026-01-25T12:58:33.961Z" }, + { url = "https://files.pythonhosted.org/packages/02/2a/7ff2884d79d420cbb2d12fed6fff727b6d0ef27253140d3cdbbd03187ee0/coverage-7.13.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2ee0e58cca0c17dd9c6c1cdde02bb705c7b3fbfa5f3b0b5afeda20d4ebff8ef4", size = 250463, upload-time = "2026-01-25T12:58:35.529Z" }, + { url = "https://files.pythonhosted.org/packages/91/c0/ba51087db645b6c7261570400fc62c89a16278763f36ba618dc8657a187b/coverage-7.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e5bbb5018bf76a56aabdb64246b5288d5ae1b7d0dd4d0534fe86df2c2992d1c", size = 250288, upload-time = "2026-01-25T12:58:37.226Z" }, + { url = "https://files.pythonhosted.org/packages/03/07/44e6f428551c4d9faf63ebcefe49b30e5c89d1be96f6a3abd86a52da9d15/coverage-7.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a55516c68ef3e08e134e818d5e308ffa6b1337cc8b092b69b24287bf07d38e31", size = 252063, upload-time = "2026-01-25T12:58:38.821Z" }, + { url = "https://files.pythonhosted.org/packages/c2/67/35b730ad7e1859dd57e834d1bc06080d22d2f87457d53f692fce3f24a5a9/coverage-7.13.2-cp313-cp313-win32.whl", hash = "sha256:5b20211c47a8abf4abc3319d8ce2464864fa9f30c5fcaf958a3eed92f4f1fef8", size = 221716, upload-time = "2026-01-25T12:58:40.484Z" }, + { url = "https://files.pythonhosted.org/packages/0d/82/e5fcf5a97c72f45fc14829237a6550bf49d0ab882ac90e04b12a69db76b4/coverage-7.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:14f500232e521201cf031549fb1ebdfc0a40f401cf519157f76c397e586c3beb", size = 222522, upload-time = "2026-01-25T12:58:43.247Z" }, + { url = "https://files.pythonhosted.org/packages/b1/f1/25d7b2f946d239dd2d6644ca2cc060d24f97551e2af13b6c24c722ae5f97/coverage-7.13.2-cp313-cp313-win_arm64.whl", hash = "sha256:9779310cb5a9778a60c899f075a8514c89fa6d10131445c2207fc893e0b14557", size = 221145, upload-time = "2026-01-25T12:58:45Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f7/080376c029c8f76fadfe43911d0daffa0cbdc9f9418a0eead70c56fb7f4b/coverage-7.13.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e64fa5a1e41ce5df6b547cbc3d3699381c9e2c2c369c67837e716ed0f549d48e", size = 219861, upload-time = "2026-01-25T12:58:46.586Z" }, + { url = "https://files.pythonhosted.org/packages/42/11/0b5e315af5ab35f4c4a70e64d3314e4eec25eefc6dec13be3a7d5ffe8ac5/coverage-7.13.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b01899e82a04085b6561eb233fd688474f57455e8ad35cd82286463ba06332b7", size = 220207, upload-time = "2026-01-25T12:58:48.277Z" }, + { url = "https://files.pythonhosted.org/packages/b2/0c/0874d0318fb1062117acbef06a09cf8b63f3060c22265adaad24b36306b7/coverage-7.13.2-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:838943bea48be0e2768b0cf7819544cdedc1bbb2f28427eabb6eb8c9eb2285d3", size = 261504, upload-time = "2026-01-25T12:58:49.904Z" }, + { url = "https://files.pythonhosted.org/packages/83/5e/1cd72c22ecb30751e43a72f40ba50fcef1b7e93e3ea823bd9feda8e51f9a/coverage-7.13.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:93d1d25ec2b27e90bcfef7012992d1f5121b51161b8bffcda756a816cf13c2c3", size = 263582, upload-time = "2026-01-25T12:58:51.582Z" }, + { url = "https://files.pythonhosted.org/packages/9b/da/8acf356707c7a42df4d0657020308e23e5a07397e81492640c186268497c/coverage-7.13.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93b57142f9621b0d12349c43fc7741fe578e4bc914c1e5a54142856cfc0bf421", size = 266008, upload-time = "2026-01-25T12:58:53.234Z" }, + { url = "https://files.pythonhosted.org/packages/41/41/ea1730af99960309423c6ea8d6a4f1fa5564b2d97bd1d29dda4b42611f04/coverage-7.13.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f06799ae1bdfff7ccb8665d75f8291c69110ba9585253de254688aa8a1ccc6c5", size = 260762, upload-time = "2026-01-25T12:58:55.372Z" }, + { url = "https://files.pythonhosted.org/packages/22/fa/02884d2080ba71db64fdc127b311db60e01fe6ba797d9c8363725e39f4d5/coverage-7.13.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f9405ab4f81d490811b1d91c7a20361135a2df4c170e7f0b747a794da5b7f23", size = 263571, upload-time = "2026-01-25T12:58:57.52Z" }, + { url = "https://files.pythonhosted.org/packages/d2/6b/4083aaaeba9b3112f55ac57c2ce7001dc4d8fa3fcc228a39f09cc84ede27/coverage-7.13.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:f9ab1d5b86f8fbc97a5b3cd6280a3fd85fef3b028689d8a2c00918f0d82c728c", size = 261200, upload-time = "2026-01-25T12:58:59.255Z" }, + { url = "https://files.pythonhosted.org/packages/e9/d2/aea92fa36d61955e8c416ede9cf9bf142aa196f3aea214bb67f85235a050/coverage-7.13.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:f674f59712d67e841525b99e5e2b595250e39b529c3bda14764e4f625a3fa01f", size = 260095, upload-time = "2026-01-25T12:59:01.066Z" }, + { url = "https://files.pythonhosted.org/packages/0d/ae/04ffe96a80f107ea21b22b2367175c621da920063260a1c22f9452fd7866/coverage-7.13.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c6cadac7b8ace1ba9144feb1ae3cb787a6065ba6d23ffc59a934b16406c26573", size = 262284, upload-time = "2026-01-25T12:59:02.802Z" }, + { url = "https://files.pythonhosted.org/packages/1c/7a/6f354dcd7dfc41297791d6fb4e0d618acb55810bde2c1fd14b3939e05c2b/coverage-7.13.2-cp313-cp313t-win32.whl", hash = "sha256:14ae4146465f8e6e6253eba0cccd57423e598a4cb925958b240c805300918343", size = 222389, upload-time = "2026-01-25T12:59:04.563Z" }, + { url = "https://files.pythonhosted.org/packages/8d/d5/080ad292a4a3d3daf411574be0a1f56d6dee2c4fdf6b005342be9fac807f/coverage-7.13.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9074896edd705a05769e3de0eac0a8388484b503b68863dd06d5e473f874fd47", size = 223450, upload-time = "2026-01-25T12:59:06.677Z" }, + { url = "https://files.pythonhosted.org/packages/88/96/df576fbacc522e9fb8d1c4b7a7fc62eb734be56e2cba1d88d2eabe08ea3f/coverage-7.13.2-cp313-cp313t-win_arm64.whl", hash = "sha256:69e526e14f3f854eda573d3cf40cffd29a1a91c684743d904c33dbdcd0e0f3e7", size = 221707, upload-time = "2026-01-25T12:59:08.363Z" }, + { url = "https://files.pythonhosted.org/packages/55/53/1da9e51a0775634b04fcc11eb25c002fc58ee4f92ce2e8512f94ac5fc5bf/coverage-7.13.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:387a825f43d680e7310e6f325b2167dd093bc8ffd933b83e9aa0983cf6e0a2ef", size = 219213, upload-time = "2026-01-25T12:59:11.909Z" }, + { url = "https://files.pythonhosted.org/packages/46/35/b3caac3ebbd10230fea5a33012b27d19e999a17c9285c4228b4b2e35b7da/coverage-7.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f0d7fea9d8e5d778cd5a9e8fc38308ad688f02040e883cdc13311ef2748cb40f", size = 219549, upload-time = "2026-01-25T12:59:13.638Z" }, + { url = "https://files.pythonhosted.org/packages/76/9c/e1cf7def1bdc72c1907e60703983a588f9558434a2ff94615747bd73c192/coverage-7.13.2-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e080afb413be106c95c4ee96b4fffdc9e2fa56a8bbf90b5c0918e5c4449412f5", size = 250586, upload-time = "2026-01-25T12:59:15.808Z" }, + { url = "https://files.pythonhosted.org/packages/ba/49/f54ec02ed12be66c8d8897270505759e057b0c68564a65c429ccdd1f139e/coverage-7.13.2-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a7fc042ba3c7ce25b8a9f097eb0f32a5ce1ccdb639d9eec114e26def98e1f8a4", size = 253093, upload-time = "2026-01-25T12:59:17.491Z" }, + { url = "https://files.pythonhosted.org/packages/fb/5e/aaf86be3e181d907e23c0f61fccaeb38de8e6f6b47aed92bf57d8fc9c034/coverage-7.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d0ba505e021557f7f8173ee8cd6b926373d8653e5ff7581ae2efce1b11ef4c27", size = 254446, upload-time = "2026-01-25T12:59:19.752Z" }, + { url = "https://files.pythonhosted.org/packages/28/c8/a5fa01460e2d75b0c853b392080d6829d3ca8b5ab31e158fa0501bc7c708/coverage-7.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7de326f80e3451bd5cc7239ab46c73ddb658fe0b7649476bc7413572d36cd548", size = 250615, upload-time = "2026-01-25T12:59:21.928Z" }, + { url = "https://files.pythonhosted.org/packages/86/0b/6d56315a55f7062bb66410732c24879ccb2ec527ab6630246de5fe45a1df/coverage-7.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:abaea04f1e7e34841d4a7b343904a3f59481f62f9df39e2cd399d69a187a9660", size = 252452, upload-time = "2026-01-25T12:59:23.592Z" }, + { url = "https://files.pythonhosted.org/packages/30/19/9bc550363ebc6b0ea121977ee44d05ecd1e8bf79018b8444f1028701c563/coverage-7.13.2-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9f93959ee0c604bccd8e0697be21de0887b1f73efcc3aa73a3ec0fd13feace92", size = 250418, upload-time = "2026-01-25T12:59:25.392Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/580530a31ca2f0cc6f07a8f2ab5460785b02bb11bdf815d4c4d37a4c5169/coverage-7.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:13fe81ead04e34e105bf1b3c9f9cdf32ce31736ee5d90a8d2de02b9d3e1bcb82", size = 250231, upload-time = "2026-01-25T12:59:27.888Z" }, + { url = "https://files.pythonhosted.org/packages/e2/42/dd9093f919dc3088cb472893651884bd675e3df3d38a43f9053656dca9a2/coverage-7.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d6d16b0f71120e365741bca2cb473ca6fe38930bc5431c5e850ba949f708f892", size = 251888, upload-time = "2026-01-25T12:59:29.636Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a6/0af4053e6e819774626e133c3d6f70fae4d44884bfc4b126cb647baee8d3/coverage-7.13.2-cp314-cp314-win32.whl", hash = "sha256:9b2f4714bb7d99ba3790ee095b3b4ac94767e1347fe424278a0b10acb3ff04fe", size = 221968, upload-time = "2026-01-25T12:59:31.424Z" }, + { url = "https://files.pythonhosted.org/packages/c4/cc/5aff1e1f80d55862442855517bb8ad8ad3a68639441ff6287dde6a58558b/coverage-7.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:e4121a90823a063d717a96e0a0529c727fb31ea889369a0ee3ec00ed99bf6859", size = 222783, upload-time = "2026-01-25T12:59:33.118Z" }, + { url = "https://files.pythonhosted.org/packages/de/20/09abafb24f84b3292cc658728803416c15b79f9ee5e68d25238a895b07d9/coverage-7.13.2-cp314-cp314-win_arm64.whl", hash = "sha256:6873f0271b4a15a33e7590f338d823f6f66f91ed147a03938d7ce26efd04eee6", size = 221348, upload-time = "2026-01-25T12:59:34.939Z" }, + { url = "https://files.pythonhosted.org/packages/b6/60/a3820c7232db63be060e4019017cd3426751c2699dab3c62819cdbcea387/coverage-7.13.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:f61d349f5b7cd95c34017f1927ee379bfbe9884300d74e07cf630ccf7a610c1b", size = 219950, upload-time = "2026-01-25T12:59:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/fd/37/e4ef5975fdeb86b1e56db9a82f41b032e3d93a840ebaf4064f39e770d5c5/coverage-7.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a43d34ce714f4ca674c0d90beb760eb05aad906f2c47580ccee9da8fe8bfb417", size = 220209, upload-time = "2026-01-25T12:59:38.339Z" }, + { url = "https://files.pythonhosted.org/packages/54/df/d40e091d00c51adca1e251d3b60a8b464112efa3004949e96a74d7c19a64/coverage-7.13.2-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bff1b04cb9d4900ce5c56c4942f047dc7efe57e2608cb7c3c8936e9970ccdbee", size = 261576, upload-time = "2026-01-25T12:59:40.446Z" }, + { url = "https://files.pythonhosted.org/packages/c5/44/5259c4bed54e3392e5c176121af9f71919d96dde853386e7730e705f3520/coverage-7.13.2-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6ae99e4560963ad8e163e819e5d77d413d331fd00566c1e0856aa252303552c1", size = 263704, upload-time = "2026-01-25T12:59:42.346Z" }, + { url = "https://files.pythonhosted.org/packages/16/bd/ae9f005827abcbe2c70157459ae86053971c9fa14617b63903abbdce26d9/coverage-7.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e79a8c7d461820257d9aa43716c4efc55366d7b292e46b5b37165be1d377405d", size = 266109, upload-time = "2026-01-25T12:59:44.073Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c0/8e279c1c0f5b1eaa3ad9b0fb7a5637fc0379ea7d85a781c0fe0bb3cfc2ab/coverage-7.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:060ee84f6a769d40c492711911a76811b4befb6fba50abb450371abb720f5bd6", size = 260686, upload-time = "2026-01-25T12:59:45.804Z" }, + { url = "https://files.pythonhosted.org/packages/b2/47/3a8112627e9d863e7cddd72894171c929e94491a597811725befdcd76bce/coverage-7.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3bca209d001fd03ea2d978f8a4985093240a355c93078aee3f799852c23f561a", size = 263568, upload-time = "2026-01-25T12:59:47.929Z" }, + { url = "https://files.pythonhosted.org/packages/92/bc/7ea367d84afa3120afc3ce6de294fd2dcd33b51e2e7fbe4bbfd200f2cb8c/coverage-7.13.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:6b8092aa38d72f091db61ef83cb66076f18f02da3e1a75039a4f218629600e04", size = 261174, upload-time = "2026-01-25T12:59:49.717Z" }, + { url = "https://files.pythonhosted.org/packages/33/b7/f1092dcecb6637e31cc2db099581ee5c61a17647849bae6b8261a2b78430/coverage-7.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:4a3158dc2dcce5200d91ec28cd315c999eebff355437d2765840555d765a6e5f", size = 260017, upload-time = "2026-01-25T12:59:51.463Z" }, + { url = "https://files.pythonhosted.org/packages/2b/cd/f3d07d4b95fbe1a2ef0958c15da614f7e4f557720132de34d2dc3aa7e911/coverage-7.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3973f353b2d70bd9796cc12f532a05945232ccae966456c8ed7034cb96bbfd6f", size = 262337, upload-time = "2026-01-25T12:59:53.407Z" }, + { url = "https://files.pythonhosted.org/packages/e0/db/b0d5b2873a07cb1e06a55d998697c0a5a540dcefbf353774c99eb3874513/coverage-7.13.2-cp314-cp314t-win32.whl", hash = "sha256:79f6506a678a59d4ded048dc72f1859ebede8ec2b9a2d509ebe161f01c2879d3", size = 222749, upload-time = "2026-01-25T12:59:56.316Z" }, + { url = "https://files.pythonhosted.org/packages/e5/2f/838a5394c082ac57d85f57f6aba53093b30d9089781df72412126505716f/coverage-7.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:196bfeabdccc5a020a57d5a368c681e3a6ceb0447d153aeccc1ab4d70a5032ba", size = 223857, upload-time = "2026-01-25T12:59:58.201Z" }, + { url = "https://files.pythonhosted.org/packages/44/d4/b608243e76ead3a4298824b50922b89ef793e50069ce30316a65c1b4d7ef/coverage-7.13.2-cp314-cp314t-win_arm64.whl", hash = "sha256:69269ab58783e090bfbf5b916ab3d188126e22d6070bbfc93098fdd474ef937c", size = 221881, upload-time = "2026-01-25T13:00:00.449Z" }, + { url = "https://files.pythonhosted.org/packages/d2/db/d291e30fdf7ea617a335531e72294e0c723356d7fdde8fba00610a76bda9/coverage-7.13.2-py3-none-any.whl", hash = "sha256:40ce1ea1e25125556d8e76bd0b61500839a07944cc287ac21d5626f3e620cad5", size = 210943, upload-time = "2026-01-25T13:00:02.388Z" }, ] [package.optional-dependencies] @@ -592,64 +592,61 @@ toml = [ [[package]] name = "cryptography" -version = "46.0.3" +version = "46.0.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, - { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, - { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, - { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, - { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, - { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, - { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, - { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, - { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, - { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, - { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, - { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, - { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, - { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, - { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, - { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, - { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, - { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, - { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, - { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, - { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, - { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, - { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, - { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, - { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, - { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, - { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, - { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, - { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, - { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, - { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, - { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, - { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, - { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, - { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, - { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, - { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, - { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, - { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, - { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, - { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, - { url = "https://files.pythonhosted.org/packages/06/8a/e60e46adab4362a682cf142c7dcb5bf79b782ab2199b0dcb81f55970807f/cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea", size = 3698132, upload-time = "2025-10-15T23:18:17.056Z" }, - { url = "https://files.pythonhosted.org/packages/da/38/f59940ec4ee91e93d3311f7532671a5cef5570eb04a144bf203b58552d11/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b", size = 4243992, upload-time = "2025-10-15T23:18:18.695Z" }, - { url = "https://files.pythonhosted.org/packages/b0/0c/35b3d92ddebfdfda76bb485738306545817253d0a3ded0bfe80ef8e67aa5/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb", size = 4409944, upload-time = "2025-10-15T23:18:20.597Z" }, - { url = "https://files.pythonhosted.org/packages/99/55/181022996c4063fc0e7666a47049a1ca705abb9c8a13830f074edb347495/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717", size = 4242957, upload-time = "2025-10-15T23:18:22.18Z" }, - { url = "https://files.pythonhosted.org/packages/ba/af/72cd6ef29f9c5f731251acadaeb821559fe25f10852f44a63374c9ca08c1/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9", size = 4409447, upload-time = "2025-10-15T23:18:24.209Z" }, - { url = "https://files.pythonhosted.org/packages/0d/c3/e90f4a4feae6410f914f8ebac129b9ae7a8c92eb60a638012dde42030a9d/cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c", size = 3438528, upload-time = "2025-10-15T23:18:26.227Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/78/19/f748958276519adf6a0c1e79e7b8860b4830dda55ccdf29f2719b5fc499c/cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59", size = 749301, upload-time = "2026-01-28T00:24:37.379Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/99/157aae7949a5f30d51fcb1a9851e8ebd5c74bf99b5285d8bb4b8b9ee641e/cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485", size = 7173686, upload-time = "2026-01-28T00:23:07.515Z" }, + { url = "https://files.pythonhosted.org/packages/87/91/874b8910903159043b5c6a123b7e79c4559ddd1896e38967567942635778/cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc", size = 4275871, upload-time = "2026-01-28T00:23:09.439Z" }, + { url = "https://files.pythonhosted.org/packages/c0/35/690e809be77896111f5b195ede56e4b4ed0435b428c2f2b6d35046fbb5e8/cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0", size = 4423124, upload-time = "2026-01-28T00:23:11.529Z" }, + { url = "https://files.pythonhosted.org/packages/1a/5b/a26407d4f79d61ca4bebaa9213feafdd8806dc69d3d290ce24996d3cfe43/cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa", size = 4277090, upload-time = "2026-01-28T00:23:13.123Z" }, + { url = "https://files.pythonhosted.org/packages/0c/d8/4bb7aec442a9049827aa34cee1aa83803e528fa55da9a9d45d01d1bb933e/cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81", size = 4947652, upload-time = "2026-01-28T00:23:14.554Z" }, + { url = "https://files.pythonhosted.org/packages/2b/08/f83e2e0814248b844265802d081f2fac2f1cbe6cd258e72ba14ff006823a/cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255", size = 4455157, upload-time = "2026-01-28T00:23:16.443Z" }, + { url = "https://files.pythonhosted.org/packages/0a/05/19d849cf4096448779d2dcc9bb27d097457dac36f7273ffa875a93b5884c/cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e", size = 3981078, upload-time = "2026-01-28T00:23:17.838Z" }, + { url = "https://files.pythonhosted.org/packages/e6/89/f7bac81d66ba7cde867a743ea5b37537b32b5c633c473002b26a226f703f/cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c", size = 4276213, upload-time = "2026-01-28T00:23:19.257Z" }, + { url = "https://files.pythonhosted.org/packages/da/9f/7133e41f24edd827020ad21b068736e792bc68eecf66d93c924ad4719fb3/cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32", size = 4912190, upload-time = "2026-01-28T00:23:21.244Z" }, + { url = "https://files.pythonhosted.org/packages/a6/f7/6d43cbaddf6f65b24816e4af187d211f0bc536a29961f69faedc48501d8e/cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616", size = 4454641, upload-time = "2026-01-28T00:23:22.866Z" }, + { url = "https://files.pythonhosted.org/packages/9e/4f/ebd0473ad656a0ac912a16bd07db0f5d85184924e14fc88feecae2492834/cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0", size = 4405159, upload-time = "2026-01-28T00:23:25.278Z" }, + { url = "https://files.pythonhosted.org/packages/d1/f7/7923886f32dc47e27adeff8246e976d77258fd2aa3efdd1754e4e323bf49/cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0", size = 4666059, upload-time = "2026-01-28T00:23:26.766Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a7/0fca0fd3591dffc297278a61813d7f661a14243dd60f499a7a5b48acb52a/cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5", size = 3026378, upload-time = "2026-01-28T00:23:28.317Z" }, + { url = "https://files.pythonhosted.org/packages/2d/12/652c84b6f9873f0909374864a57b003686c642ea48c84d6c7e2c515e6da5/cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b", size = 3478614, upload-time = "2026-01-28T00:23:30.275Z" }, + { url = "https://files.pythonhosted.org/packages/b9/27/542b029f293a5cce59349d799d4d8484b3b1654a7b9a0585c266e974a488/cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908", size = 7116417, upload-time = "2026-01-28T00:23:31.958Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f5/559c25b77f40b6bf828eabaf988efb8b0e17b573545edb503368ca0a2a03/cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da", size = 4264508, upload-time = "2026-01-28T00:23:34.264Z" }, + { url = "https://files.pythonhosted.org/packages/49/a1/551fa162d33074b660dc35c9bc3616fefa21a0e8c1edd27b92559902e408/cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829", size = 4409080, upload-time = "2026-01-28T00:23:35.793Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/4d8d129a755f5d6df1bbee69ea2f35ebfa954fa1847690d1db2e8bca46a5/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2", size = 4270039, upload-time = "2026-01-28T00:23:37.263Z" }, + { url = "https://files.pythonhosted.org/packages/4c/f5/ed3fcddd0a5e39321e595e144615399e47e7c153a1fb8c4862aec3151ff9/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085", size = 4926748, upload-time = "2026-01-28T00:23:38.884Z" }, + { url = "https://files.pythonhosted.org/packages/43/ae/9f03d5f0c0c00e85ecb34f06d3b79599f20630e4db91b8a6e56e8f83d410/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b", size = 4442307, upload-time = "2026-01-28T00:23:40.56Z" }, + { url = "https://files.pythonhosted.org/packages/8b/22/e0f9f2dae8040695103369cf2283ef9ac8abe4d51f68710bec2afd232609/cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd", size = 3959253, upload-time = "2026-01-28T00:23:42.827Z" }, + { url = "https://files.pythonhosted.org/packages/01/5b/6a43fcccc51dae4d101ac7d378a8724d1ba3de628a24e11bf2f4f43cba4d/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2", size = 4269372, upload-time = "2026-01-28T00:23:44.655Z" }, + { url = "https://files.pythonhosted.org/packages/17/b7/0f6b8c1dd0779df2b526e78978ff00462355e31c0a6f6cff8a3e99889c90/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e", size = 4891908, upload-time = "2026-01-28T00:23:46.48Z" }, + { url = "https://files.pythonhosted.org/packages/83/17/259409b8349aa10535358807a472c6a695cf84f106022268d31cea2b6c97/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f", size = 4441254, upload-time = "2026-01-28T00:23:48.403Z" }, + { url = "https://files.pythonhosted.org/packages/9c/fe/e4a1b0c989b00cee5ffa0764401767e2d1cf59f45530963b894129fd5dce/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82", size = 4396520, upload-time = "2026-01-28T00:23:50.26Z" }, + { url = "https://files.pythonhosted.org/packages/b3/81/ba8fd9657d27076eb40d6a2f941b23429a3c3d2f56f5a921d6b936a27bc9/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c", size = 4651479, upload-time = "2026-01-28T00:23:51.674Z" }, + { url = "https://files.pythonhosted.org/packages/00/03/0de4ed43c71c31e4fe954edd50b9d28d658fef56555eba7641696370a8e2/cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061", size = 3001986, upload-time = "2026-01-28T00:23:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/5c/70/81830b59df7682917d7a10f833c4dab2a5574cd664e86d18139f2b421329/cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7", size = 3468288, upload-time = "2026-01-28T00:23:55.09Z" }, + { url = "https://files.pythonhosted.org/packages/56/f7/f648fdbb61d0d45902d3f374217451385edc7e7768d1b03ff1d0e5ffc17b/cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab", size = 7169583, upload-time = "2026-01-28T00:23:56.558Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cc/8f3224cbb2a928de7298d6ed4790f5ebc48114e02bdc9559196bfb12435d/cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef", size = 4275419, upload-time = "2026-01-28T00:23:58.364Z" }, + { url = "https://files.pythonhosted.org/packages/17/43/4a18faa7a872d00e4264855134ba82d23546c850a70ff209e04ee200e76f/cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d", size = 4419058, upload-time = "2026-01-28T00:23:59.867Z" }, + { url = "https://files.pythonhosted.org/packages/ee/64/6651969409821d791ba12346a124f55e1b76f66a819254ae840a965d4b9c/cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973", size = 4278151, upload-time = "2026-01-28T00:24:01.731Z" }, + { url = "https://files.pythonhosted.org/packages/20/0b/a7fce65ee08c3c02f7a8310cc090a732344066b990ac63a9dfd0a655d321/cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4", size = 4939441, upload-time = "2026-01-28T00:24:03.175Z" }, + { url = "https://files.pythonhosted.org/packages/db/a7/20c5701e2cd3e1dfd7a19d2290c522a5f435dd30957d431dcb531d0f1413/cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af", size = 4451617, upload-time = "2026-01-28T00:24:05.403Z" }, + { url = "https://files.pythonhosted.org/packages/00/dc/3e16030ea9aa47b63af6524c354933b4fb0e352257c792c4deeb0edae367/cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263", size = 3977774, upload-time = "2026-01-28T00:24:06.851Z" }, + { url = "https://files.pythonhosted.org/packages/42/c8/ad93f14118252717b465880368721c963975ac4b941b7ef88f3c56bf2897/cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095", size = 4277008, upload-time = "2026-01-28T00:24:08.926Z" }, + { url = "https://files.pythonhosted.org/packages/00/cf/89c99698151c00a4631fbfcfcf459d308213ac29e321b0ff44ceeeac82f1/cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b", size = 4903339, upload-time = "2026-01-28T00:24:12.009Z" }, + { url = "https://files.pythonhosted.org/packages/03/c3/c90a2cb358de4ac9309b26acf49b2a100957e1ff5cc1e98e6c4996576710/cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019", size = 4451216, upload-time = "2026-01-28T00:24:13.975Z" }, + { url = "https://files.pythonhosted.org/packages/96/2c/8d7f4171388a10208671e181ca43cdc0e596d8259ebacbbcfbd16de593da/cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4", size = 4404299, upload-time = "2026-01-28T00:24:16.169Z" }, + { url = "https://files.pythonhosted.org/packages/e9/23/cbb2036e450980f65c6e0a173b73a56ff3bccd8998965dea5cc9ddd424a5/cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b", size = 4664837, upload-time = "2026-01-28T00:24:17.629Z" }, + { url = "https://files.pythonhosted.org/packages/0a/21/f7433d18fe6d5845329cbdc597e30caf983229c7a245bcf54afecc555938/cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc", size = 3009779, upload-time = "2026-01-28T00:24:20.198Z" }, + { url = "https://files.pythonhosted.org/packages/3a/6a/bd2e7caa2facffedf172a45c1a02e551e6d7d4828658c9a245516a598d94/cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976", size = 3466633, upload-time = "2026-01-28T00:24:21.851Z" }, + { url = "https://files.pythonhosted.org/packages/59/e0/f9c6c53e1f2a1c2507f00f2faba00f01d2f334b35b0fbfe5286715da2184/cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b", size = 3476316, upload-time = "2026-01-28T00:24:24.144Z" }, + { url = "https://files.pythonhosted.org/packages/27/7a/f8d2d13227a9a1a9fe9c7442b057efecffa41f1e3c51d8622f26b9edbe8f/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da", size = 4216693, upload-time = "2026-01-28T00:24:25.758Z" }, + { url = "https://files.pythonhosted.org/packages/c5/de/3787054e8f7972658370198753835d9d680f6cd4a39df9f877b57f0dd69c/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80", size = 4382765, upload-time = "2026-01-28T00:24:27.577Z" }, + { url = "https://files.pythonhosted.org/packages/8a/5f/60e0afb019973ba6a0b322e86b3d61edf487a4f5597618a430a2a15f2d22/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822", size = 4216066, upload-time = "2026-01-28T00:24:29.056Z" }, + { url = "https://files.pythonhosted.org/packages/81/8e/bf4a0de294f147fee66f879d9bae6f8e8d61515558e3d12785dd90eca0be/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947", size = 4382025, upload-time = "2026-01-28T00:24:30.681Z" }, + { url = "https://files.pythonhosted.org/packages/79/f4/9ceb90cfd6a3847069b0b0b353fd3075dc69b49defc70182d8af0c4ca390/cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3", size = 3406043, upload-time = "2026-01-28T00:24:32.236Z" }, ] [[package]] @@ -692,11 +689,11 @@ wheels = [ [[package]] name = "eval-type-backport" -version = "0.3.0" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/51/23/079e39571d6dd8d90d7a369ecb55ad766efb6bae4e77389629e14458c280/eval_type_backport-0.3.0.tar.gz", hash = "sha256:1638210401e184ff17f877e9a2fa076b60b5838790f4532a21761cc2be67aea1", size = 9272, upload-time = "2025-11-13T20:56:50.845Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/d8/2a1c638d9e0aa7e269269a1a1bf423ddd94267f1a01bbe3ad03432b67dd4/eval_type_backport-0.3.0-py3-none-any.whl", hash = "sha256:975a10a0fe333c8b6260d7fdb637698c9a16c3a9e3b6eb943fee6a6f67a37fe8", size = 6061, upload-time = "2025-11-13T20:56:49.499Z" }, + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, ] [[package]] @@ -710,11 +707,11 @@ sdist = { url = "https://files.pythonhosted.org/packages/0f/1e/eafcfca27a76a7674 [[package]] name = "filelock" -version = "3.20.0" +version = "3.20.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/58/46/0028a82567109b5ef6e4d2a1f04a583fb513e6cf9527fcdd09afd817deeb/filelock-3.20.0.tar.gz", hash = "sha256:711e943b4ec6be42e1d4e6690b48dc175c822967466bb31c0c293f34334c13f4", size = 18922, upload-time = "2025-10-08T18:03:50.056Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2", size = 16054, upload-time = "2025-10-08T18:03:48.35Z" }, + { url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" }, ] [[package]] @@ -824,49 +821,68 @@ wheels = [ [[package]] name = "fsspec" -version = "2025.10.0" +version = "2026.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/24/7f/2747c0d332b9acfa75dc84447a066fdf812b5a6b8d30472b74d309bfe8cb/fsspec-2025.10.0.tar.gz", hash = "sha256:b6789427626f068f9a83ca4e8a3cc050850b6c0f71f99ddb4f542b8266a26a59", size = 309285, upload-time = "2025-10-30T14:58:44.036Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d5/7d/5df2650c57d47c57232af5ef4b4fdbff182070421e405e0d62c6cdbfaa87/fsspec-2026.1.0.tar.gz", hash = "sha256:e987cb0496a0d81bba3a9d1cee62922fb395e7d4c3b575e57f547953334fe07b", size = 310496, upload-time = "2026-01-09T15:21:35.562Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/eb/02/a6b21098b1d5d6249b7c5ab69dde30108a71e4e819d4a9778f1de1d5b70d/fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d", size = 200966, upload-time = "2025-10-30T14:58:42.53Z" }, + { url = "https://files.pythonhosted.org/packages/01/c9/97cc5aae1648dcb851958a3ddf73ccd7dbe5650d95203ecb4d7720b4cdbf/fsspec-2026.1.0-py3-none-any.whl", hash = "sha256:cb76aa913c2285a3b49bdd5fc55b1d7c708d7208126b60f2eb8194fe1b4cbdcc", size = 201838, upload-time = "2026-01-09T15:21:34.041Z" }, ] [[package]] name = "google-auth" -version = "2.43.0" +version = "2.48.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cachetools" }, + { name = "cryptography" }, { name = "pyasn1-modules" }, { name = "rsa" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ff/ef/66d14cf0e01b08d2d51ffc3c20410c4e134a1548fc246a6081eae585a4fe/google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483", size = 296359, upload-time = "2025-11-06T00:13:36.587Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0c/41/242044323fbd746615884b1c16639749e73665b718209946ebad7ba8a813/google_auth-2.48.0.tar.gz", hash = "sha256:4f7e706b0cd3208a3d940a19a822c37a476ddba5450156c3e6624a71f7c841ce", size = 326522, upload-time = "2026-01-26T19:22:47.157Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6f/d1/385110a9ae86d91cc14c5282c61fe9f4dc41c0b9f7d423c6ad77038c4448/google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16", size = 223114, upload-time = "2025-11-06T00:13:35.209Z" }, + { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, +] + +[package.optional-dependencies] +requests = [ + { name = "requests" }, ] [[package]] name = "google-genai" -version = "1.52.0" +version = "1.61.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, - { name = "google-auth" }, + { name = "distro" }, + { name = "google-auth", extra = ["requests"] }, { name = "httpx" }, { name = "pydantic" }, { name = "requests" }, + { name = "sniffio" }, { name = "tenacity" }, { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/09/4e/0ad8585d05312074bb69711b2d81cfed69ce0ae441913d57bf169bed20a7/google_genai-1.52.0.tar.gz", hash = "sha256:a74e8a4b3025f23aa98d6a0f84783119012ca6c336fd68f73c5d2b11465d7fc5", size = 258743, upload-time = "2025-11-21T02:18:55.742Z" } +sdist = { url = "https://files.pythonhosted.org/packages/69/38/421cd7e70952a536be87a0249409f87297d84f523754a25b08fe94b97e7f/google_genai-1.61.0.tar.gz", hash = "sha256:5773a4e8ad5b2ebcd54a633a67d8e9c4f413032fef07977ee47ffa34a6d3bbdf", size = 489672, upload-time = "2026-01-30T20:50:27.177Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/66/03f663e7bca7abe9ccfebe6cb3fe7da9a118fd723a5abb278d6117e7990e/google_genai-1.52.0-py3-none-any.whl", hash = "sha256:c8352b9f065ae14b9322b949c7debab8562982f03bf71d44130cd2b798c20743", size = 261219, upload-time = "2025-11-21T02:18:54.515Z" }, + { url = "https://files.pythonhosted.org/packages/0e/87/78dd70cb59f7acf3350f53c5144a7aa7bc39c6f425cd7dc1224b59fcdac3/google_genai-1.61.0-py3-none-any.whl", hash = "sha256:cb073ef8287581476c1c3f4d8e735426ee34478e500a56deef218fa93071e3ca", size = 721948, upload-time = "2026-01-30T20:50:25.551Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, ] [[package]] name = "groq" -version = "0.37.0" +version = "1.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -876,9 +892,9 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/be/d6/97a982de7bcddbec3b8f57c20af395fe45a858314f132087e1ab2e9c95f0/groq-0.37.0.tar.gz", hash = "sha256:5b914f8bb13ca5097f8d3737deeed45453de1bef023ba896e8dfb167bd644452", size = 145106, upload-time = "2025-12-01T18:32:43.785Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/12/f4099a141677fcd2ed79dcc1fcec431e60c52e0e90c9c5d935f0ffaf8c0e/groq-1.0.0.tar.gz", hash = "sha256:66cb7bb729e6eb644daac7ce8efe945e99e4eb33657f733ee6f13059ef0c25a9", size = 146068, upload-time = "2025-12-17T23:34:23.115Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/62/3c/27e981ee05d647cd55db65349508db2d9b0ee3857db680a617201ca820fa/groq-0.37.0-py3-none-any.whl", hash = "sha256:1403bf1a14a725f9240bed10501ca1ba42cfc9f73113d91ff3182d3bb6008a4c", size = 137522, upload-time = "2025-12-01T18:32:42.302Z" }, + { url = "https://files.pythonhosted.org/packages/4a/88/3175759d2ef30406ea721f4d837bfa1ba4339fde3b81ba8c5640a96ed231/groq-1.0.0-py3-none-any.whl", hash = "sha256:6e22bf92ffad988f01d2d4df7729add66b8fd5dbfb2154b5bbf3af245b72c731", size = 138292, upload-time = "2025-12-17T23:34:21.957Z" }, ] [[package]] @@ -949,21 +965,23 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "0.36.0" +version = "1.3.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, { name = "fsspec" }, - { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "hf-xet", marker = "platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "httpx" }, { name = "packaging" }, { name = "pyyaml" }, - { name = "requests" }, + { name = "shellingham" }, { name = "tqdm" }, + { name = "typer-slim" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/98/63/4910c5fa9128fdadf6a9c5ac138e8b1b6cee4ca44bf7915bbfbce4e355ee/huggingface_hub-0.36.0.tar.gz", hash = "sha256:47b3f0e2539c39bf5cde015d63b72ec49baff67b6931c3d97f3f84532e2b8d25", size = 463358, upload-time = "2025-10-23T12:12:01.413Z" } +sdist = { url = "https://files.pythonhosted.org/packages/67/e9/2658cb9bc4c72a67b7f87650e827266139befaf499095883d30dabc4d49f/huggingface_hub-1.3.5.tar.gz", hash = "sha256:8045aca8ddab35d937138f3c386c6d43a275f53437c5c64cdc9aa8408653b4ed", size = 627456, upload-time = "2026-01-29T10:34:19.687Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cb/bd/1a875e0d592d447cbc02805fd3fe0f497714d6a2583f59d14fa9ebad96eb/huggingface_hub-0.36.0-py3-none-any.whl", hash = "sha256:7bcc9ad17d5b3f07b57c78e79d527102d08313caa278a641993acddcb894548d", size = 566094, upload-time = "2025-10-23T12:11:59.557Z" }, + { url = "https://files.pythonhosted.org/packages/f9/84/a579b95c46fe8e319f89dc700c087596f665141575f4dcf136aaa97d856f/huggingface_hub-1.3.5-py3-none-any.whl", hash = "sha256:fe332d7f86a8af874768452295c22cd3f37730fb2463cf6cc3295e26036f8ef9", size = 536675, upload-time = "2026-01-29T10:34:17.713Z" }, ] [[package]] @@ -1012,7 +1030,7 @@ sdist = { url = "https://files.pythonhosted.org/packages/f3/ff/c9baf0997266d398a [[package]] name = "ibm-watsonx-ai" -version = "1.4.7" +version = "1.5.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cachetools" }, @@ -1026,9 +1044,9 @@ dependencies = [ { name = "tabulate" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/65/8d/49096f20333c1782d62a0405d4fe67f82c2499c0282c8b1e2ff2f85aa991/ibm_watsonx_ai-1.4.7.tar.gz", hash = "sha256:046119c62a8b24327699b39d020622d655621fc495c85b54d5db7501ac40ce20", size = 699841, upload-time = "2025-11-18T15:33:45.825Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/b4/5d9d54ffd2bfadd35d5f5e1bd25bf94eb33a3b2972cad2449ca1fb6ed4b8/ibm_watsonx_ai-1.5.1.tar.gz", hash = "sha256:b03948afee418c94151c8146c0d8b7b1808c80359dbc58befe12355fffbb4c0d", size = 707589, upload-time = "2026-01-29T09:15:03.531Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f5/b2/16f8be647c66e7741efb6fb0a3c7dc7a3ca9d6c44fe023480679a72fa238/ibm_watsonx_ai-1.4.7-py3-none-any.whl", hash = "sha256:e7ef74790e4e5fe5954098c84f05847395e3cb1e28f6cfeb5fb1f6da57f862c3", size = 1070546, upload-time = "2025-11-18T15:33:44.024Z" }, + { url = "https://files.pythonhosted.org/packages/98/fd/556a4229348bd2e8159d565c461b02e93485901eb5f8bc210325ce6a1d53/ibm_watsonx_ai-1.5.1-py3-none-any.whl", hash = "sha256:82072cf542fec58d338ab88c6c2e59564e6258d4eadf3d82d2e83f916dcfc99e", size = 1077190, upload-time = "2026-01-29T09:15:01.973Z" }, ] [[package]] @@ -1042,14 +1060,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.7.0" +version = "8.7.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, ] [[package]] @@ -1084,26 +1102,26 @@ wheels = [ [[package]] name = "jaraco-context" -version = "6.0.1" +version = "6.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backports-tarfile", marker = "python_full_version < '3.12'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/df/ad/f3777b81bf0b6e7bc7514a1656d3e637b2e8e15fab2ce3235730b3e7a4e6/jaraco_context-6.0.1.tar.gz", hash = "sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3", size = 13912, upload-time = "2024-08-20T03:39:27.358Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/9c/a788f5bb29c61e456b8ee52ce76dbdd32fd72cd73dd67bc95f42c7a8d13c/jaraco_context-6.1.0.tar.gz", hash = "sha256:129a341b0a85a7db7879e22acd66902fda67882db771754574338898b2d5d86f", size = 15850, upload-time = "2026-01-13T02:53:53.847Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/db/0c52c4cf5e4bd9f5d7135ec7669a3a767af21b3a308e1ed3674881e52b62/jaraco.context-6.0.1-py3-none-any.whl", hash = "sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4", size = 6825, upload-time = "2024-08-20T03:39:25.966Z" }, + { url = "https://files.pythonhosted.org/packages/8d/48/aa685dbf1024c7bd82bede569e3a85f82c32fd3d79ba5fea578f0159571a/jaraco_context-6.1.0-py3-none-any.whl", hash = "sha256:a43b5ed85815223d0d3cfdb6d7ca0d2bc8946f28f30b6f3216bda070f68badda", size = 7065, upload-time = "2026-01-13T02:53:53.031Z" }, ] [[package]] name = "jaraco-functools" -version = "4.3.0" +version = "4.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "more-itertools" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f7/ed/1aa2d585304ec07262e1a83a9889880701079dde796ac7b1d1826f40c63d/jaraco_functools-4.3.0.tar.gz", hash = "sha256:cfd13ad0dd2c47a3600b439ef72d8615d482cedcff1632930d6f28924d92f294", size = 19755, upload-time = "2025-08-18T20:05:09.91Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/27/056e0638a86749374d6f57d0b0db39f29509cce9313cf91bdc0ac4d91084/jaraco_functools-4.4.0.tar.gz", hash = "sha256:da21933b0417b89515562656547a77b4931f98176eb173644c0d35032a33d6bb", size = 19943, upload-time = "2025-12-21T09:29:43.6Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b4/09/726f168acad366b11e420df31bf1c702a54d373a83f968d94141a8c3fde0/jaraco_functools-4.3.0-py3-none-any.whl", hash = "sha256:227ff8ed6f7b8f62c56deff101545fa7543cf2c8e7b82a7c2116e672f29c26e8", size = 10408, upload-time = "2025-08-18T20:05:08.69Z" }, + { url = "https://files.pythonhosted.org/packages/fd/c4/813bb09f0985cb21e959f21f2464169eca882656849adf727ac7bb7e1767/jaraco_functools-4.4.0-py3-none-any.whl", hash = "sha256:9eec1e36f45c818d9bf307c8948eb03b2b56cd44087b3cdc989abca1f20b9176", size = 10481, upload-time = "2025-12-21T09:29:42.27Z" }, ] [[package]] @@ -1223,7 +1241,7 @@ wheels = [ [[package]] name = "jsonschema" -version = "4.25.1" +version = "4.26.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -1231,9 +1249,9 @@ dependencies = [ { name = "referencing" }, { name = "rpds-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/fc/e067678238fa451312d4c62bf6e6cf5ec56375422aee02f9cb5f909b3047/jsonschema-4.26.0.tar.gz", hash = "sha256:0c26707e2efad8aa1bfc5b7ce170f3fccc2e4918ff85989ba9ffa9facb2be326", size = 366583, upload-time = "2026-01-07T13:41:07.246Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, + { url = "https://files.pythonhosted.org/packages/69/90/f63fb5873511e014207a475e2bb4e8b2e570d655b00ac19a9a0ca0a385ee/jsonschema-4.26.0-py3-none-any.whl", hash = "sha256:d489f15263b8d200f8387e64b4c3a75f06629559fb73deb8fdfb525f2dab50ce", size = 90630, upload-time = "2026-01-07T13:41:05.306Z" }, ] [[package]] @@ -1266,69 +1284,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/81/db/e655086b7f3a705df045bf0933bdd9c2f79bb3c97bfef1384598bb79a217/keyring-25.7.0-py3-none-any.whl", hash = "sha256:be4a0b195f149690c166e850609a477c532ddbfbaed96a404d4e43f8d5e2689f", size = 39160, upload-time = "2025-11-16T16:26:08.402Z" }, ] -[[package]] -name = "librt" -version = "0.6.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/37/c3/cdff3c10e2e608490dc0a310ccf11ba777b3943ad4fcead2a2ade98c21e1/librt-0.6.3.tar.gz", hash = "sha256:c724a884e642aa2bbad52bb0203ea40406ad742368a5f90da1b220e970384aae", size = 54209, upload-time = "2025-11-29T14:01:56.058Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/80/bc60fd16fe24910bf5974fb914778a2e8540cef55385ab2cb04a0dfe42c4/librt-0.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:61348cc488b18d1b1ff9f3e5fcd5ac43ed22d3e13e862489d2267c2337285c08", size = 27285, upload-time = "2025-11-29T14:00:46.626Z" }, - { url = "https://files.pythonhosted.org/packages/88/3c/26335536ed9ba097c79cffcee148393592e55758fe76d99015af3e47a6d0/librt-0.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64645b757d617ad5f98c08e07620bc488d4bced9ced91c6279cec418f16056fa", size = 27629, upload-time = "2025-11-29T14:00:47.863Z" }, - { url = "https://files.pythonhosted.org/packages/af/fd/2dcedeacfedee5d2eda23e7a49c1c12ce6221b5d58a13555f053203faafc/librt-0.6.3-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:26b8026393920320bb9a811b691d73c5981385d537ffc5b6e22e53f7b65d4122", size = 82039, upload-time = "2025-11-29T14:00:49.131Z" }, - { url = "https://files.pythonhosted.org/packages/48/ff/6aa11914b83b0dc2d489f7636942a8e3322650d0dba840db9a1b455f3caa/librt-0.6.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d998b432ed9ffccc49b820e913c8f327a82026349e9c34fa3690116f6b70770f", size = 86560, upload-time = "2025-11-29T14:00:50.403Z" }, - { url = "https://files.pythonhosted.org/packages/76/a1/d25af61958c2c7eb978164aeba0350719f615179ba3f428b682b9a5fdace/librt-0.6.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e18875e17ef69ba7dfa9623f2f95f3eda6f70b536079ee6d5763ecdfe6cc9040", size = 86494, upload-time = "2025-11-29T14:00:51.383Z" }, - { url = "https://files.pythonhosted.org/packages/7d/4b/40e75d3b258c801908e64b39788f9491635f9554f8717430a491385bd6f2/librt-0.6.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a218f85081fc3f70cddaed694323a1ad7db5ca028c379c214e3a7c11c0850523", size = 88914, upload-time = "2025-11-29T14:00:52.688Z" }, - { url = "https://files.pythonhosted.org/packages/97/6d/0070c81aba8a169224301c75fb5fb6c3c25ca67e6ced086584fc130d5a67/librt-0.6.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1ef42ff4edd369e84433ce9b188a64df0837f4f69e3d34d3b34d4955c599d03f", size = 86944, upload-time = "2025-11-29T14:00:53.768Z" }, - { url = "https://files.pythonhosted.org/packages/a6/94/809f38887941b7726692e0b5a083dbdc87dbb8cf893e3b286550c5f0b129/librt-0.6.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0e0f2b79993fec23a685b3e8107ba5f8675eeae286675a216da0b09574fa1e47", size = 89852, upload-time = "2025-11-29T14:00:54.71Z" }, - { url = "https://files.pythonhosted.org/packages/58/a3/b0e5b1cda675b91f1111d8ba941da455d8bfaa22f4d2d8963ba96ccb5b12/librt-0.6.3-cp311-cp311-win32.whl", hash = "sha256:fd98cacf4e0fabcd4005c452cb8a31750258a85cab9a59fb3559e8078da408d7", size = 19948, upload-time = "2025-11-29T14:00:55.989Z" }, - { url = "https://files.pythonhosted.org/packages/cc/73/70011c2b37e3be3ece3affd3abc8ebe5cda482b03fd6b3397906321a901e/librt-0.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:e17b5b42c8045867ca9d1f54af00cc2275198d38de18545edaa7833d7e9e4ac8", size = 21406, upload-time = "2025-11-29T14:00:56.874Z" }, - { url = "https://files.pythonhosted.org/packages/91/ee/119aa759290af6ca0729edf513ca390c1afbeae60f3ecae9b9d56f25a8a9/librt-0.6.3-cp311-cp311-win_arm64.whl", hash = "sha256:87597e3d57ec0120a3e1d857a708f80c02c42ea6b00227c728efbc860f067c45", size = 20875, upload-time = "2025-11-29T14:00:57.752Z" }, - { url = "https://files.pythonhosted.org/packages/b4/2c/b59249c566f98fe90e178baf59e83f628d6c38fb8bc78319301fccda0b5e/librt-0.6.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74418f718083009108dc9a42c21bf2e4802d49638a1249e13677585fcc9ca176", size = 27841, upload-time = "2025-11-29T14:00:58.925Z" }, - { url = "https://files.pythonhosted.org/packages/40/e8/9db01cafcd1a2872b76114c858f81cc29ce7ad606bc102020d6dabf470fb/librt-0.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:514f3f363d1ebc423357d36222c37e5c8e6674b6eae8d7195ac9a64903722057", size = 27844, upload-time = "2025-11-29T14:01:00.2Z" }, - { url = "https://files.pythonhosted.org/packages/59/4d/da449d3a7d83cc853af539dee42adc37b755d7eea4ad3880bacfd84b651d/librt-0.6.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cf1115207a5049d1f4b7b4b72de0e52f228d6c696803d94843907111cbf80610", size = 84091, upload-time = "2025-11-29T14:01:01.118Z" }, - { url = "https://files.pythonhosted.org/packages/ea/6c/f90306906fb6cc6eaf4725870f0347115de05431e1f96d35114392d31fda/librt-0.6.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ad8ba80cdcea04bea7b78fcd4925bfbf408961e9d8397d2ee5d3ec121e20c08c", size = 88239, upload-time = "2025-11-29T14:01:02.11Z" }, - { url = "https://files.pythonhosted.org/packages/e7/ae/473ce7b423cfac2cb503851a89d9d2195bf615f534d5912bf86feeebbee7/librt-0.6.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4018904c83eab49c814e2494b4e22501a93cdb6c9f9425533fe693c3117126f9", size = 88815, upload-time = "2025-11-29T14:01:03.114Z" }, - { url = "https://files.pythonhosted.org/packages/c4/6d/934df738c87fb9617cabefe4891eece585a06abe6def25b4bca3b174429d/librt-0.6.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8983c5c06ac9c990eac5eb97a9f03fe41dc7e9d7993df74d9e8682a1056f596c", size = 90598, upload-time = "2025-11-29T14:01:04.071Z" }, - { url = "https://files.pythonhosted.org/packages/72/89/eeaa124f5e0f431c2b39119550378ae817a4b1a3c93fd7122f0639336fff/librt-0.6.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7769c579663a6f8dbf34878969ac71befa42067ce6bf78e6370bf0d1194997c", size = 88603, upload-time = "2025-11-29T14:01:05.02Z" }, - { url = "https://files.pythonhosted.org/packages/4d/ed/c60b3c1cfc27d709bc0288af428ce58543fcb5053cf3eadbc773c24257f5/librt-0.6.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d3c9a07eafdc70556f8c220da4a538e715668c0c63cabcc436a026e4e89950bf", size = 92112, upload-time = "2025-11-29T14:01:06.304Z" }, - { url = "https://files.pythonhosted.org/packages/c1/ab/f56169be5f716ef4ab0277be70bcb1874b4effc262e655d85b505af4884d/librt-0.6.3-cp312-cp312-win32.whl", hash = "sha256:38320386a48a15033da295df276aea93a92dfa94a862e06893f75ea1d8bbe89d", size = 20127, upload-time = "2025-11-29T14:01:07.283Z" }, - { url = "https://files.pythonhosted.org/packages/ff/8d/222750ce82bf95125529eaab585ac7e2829df252f3cfc05d68792fb1dd2c/librt-0.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:c0ecf4786ad0404b072196b5df774b1bb23c8aacdcacb6c10b4128bc7b00bd01", size = 21545, upload-time = "2025-11-29T14:01:08.184Z" }, - { url = "https://files.pythonhosted.org/packages/72/c9/f731ddcfb72f446a92a8674c6b8e1e2242773cce43a04f41549bd8b958ff/librt-0.6.3-cp312-cp312-win_arm64.whl", hash = "sha256:9f2a6623057989ebc469cd9cc8fe436c40117a0147627568d03f84aef7854c55", size = 20946, upload-time = "2025-11-29T14:01:09.384Z" }, - { url = "https://files.pythonhosted.org/packages/dd/aa/3055dd440f8b8b3b7e8624539a0749dd8e1913e978993bcca9ce7e306231/librt-0.6.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9e716f9012148a81f02f46a04fc4c663420c6fbfeacfac0b5e128cf43b4413d3", size = 27874, upload-time = "2025-11-29T14:01:10.615Z" }, - { url = "https://files.pythonhosted.org/packages/ef/93/226d7dd455eaa4c26712b5ccb2dfcca12831baa7f898c8ffd3a831e29fda/librt-0.6.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:669ff2495728009a96339c5ad2612569c6d8be4474e68f3f3ac85d7c3261f5f5", size = 27852, upload-time = "2025-11-29T14:01:11.535Z" }, - { url = "https://files.pythonhosted.org/packages/4e/8b/db9d51191aef4e4cc06285250affe0bb0ad8b2ed815f7ca77951655e6f02/librt-0.6.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:349b6873ebccfc24c9efd244e49da9f8a5c10f60f07575e248921aae2123fc42", size = 84264, upload-time = "2025-11-29T14:01:12.461Z" }, - { url = "https://files.pythonhosted.org/packages/8d/53/297c96bda3b5a73bdaf748f1e3ae757edd29a0a41a956b9c10379f193417/librt-0.6.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c74c26736008481c9f6d0adf1aedb5a52aff7361fea98276d1f965c0256ee70", size = 88432, upload-time = "2025-11-29T14:01:13.405Z" }, - { url = "https://files.pythonhosted.org/packages/54/3a/c005516071123278e340f22de72fa53d51e259d49215295c212da16c4dc2/librt-0.6.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:408a36ddc75e91918cb15b03460bdc8a015885025d67e68c6f78f08c3a88f522", size = 89014, upload-time = "2025-11-29T14:01:14.373Z" }, - { url = "https://files.pythonhosted.org/packages/8e/9b/ea715f818d926d17b94c80a12d81a79e95c44f52848e61e8ca1ff29bb9a9/librt-0.6.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e61ab234624c9ffca0248a707feffe6fac2343758a36725d8eb8a6efef0f8c30", size = 90807, upload-time = "2025-11-29T14:01:15.377Z" }, - { url = "https://files.pythonhosted.org/packages/f0/fc/4e2e4c87e002fa60917a8e474fd13c4bac9a759df82be3778573bb1ab954/librt-0.6.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:324462fe7e3896d592b967196512491ec60ca6e49c446fe59f40743d08c97917", size = 88890, upload-time = "2025-11-29T14:01:16.633Z" }, - { url = "https://files.pythonhosted.org/packages/70/7f/c7428734fbdfd4db3d5b9237fc3a857880b2ace66492836f6529fef25d92/librt-0.6.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:36b2ec8c15030002c7f688b4863e7be42820d7c62d9c6eece3db54a2400f0530", size = 92300, upload-time = "2025-11-29T14:01:17.658Z" }, - { url = "https://files.pythonhosted.org/packages/f9/0c/738c4824fdfe74dc0f95d5e90ef9e759d4ecf7fd5ba964d54a7703322251/librt-0.6.3-cp313-cp313-win32.whl", hash = "sha256:25b1b60cb059471c0c0c803e07d0dfdc79e41a0a122f288b819219ed162672a3", size = 20159, upload-time = "2025-11-29T14:01:18.61Z" }, - { url = "https://files.pythonhosted.org/packages/f2/95/93d0e61bc617306ecf4c54636b5cbde4947d872563565c4abdd9d07a39d3/librt-0.6.3-cp313-cp313-win_amd64.whl", hash = "sha256:10a95ad074e2a98c9e4abc7f5b7d40e5ecbfa84c04c6ab8a70fabf59bd429b88", size = 21484, upload-time = "2025-11-29T14:01:19.506Z" }, - { url = "https://files.pythonhosted.org/packages/10/23/abd7ace79ab54d1dbee265f13529266f686a7ce2d21ab59a992f989009b6/librt-0.6.3-cp313-cp313-win_arm64.whl", hash = "sha256:17000df14f552e86877d67e4ab7966912224efc9368e998c96a6974a8d609bf9", size = 20935, upload-time = "2025-11-29T14:01:20.415Z" }, - { url = "https://files.pythonhosted.org/packages/83/14/c06cb31152182798ed98be73f54932ab984894f5a8fccf9b73130897a938/librt-0.6.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8e695f25d1a425ad7a272902af8ab8c8d66c1998b177e4b5f5e7b4e215d0c88a", size = 27566, upload-time = "2025-11-29T14:01:21.609Z" }, - { url = "https://files.pythonhosted.org/packages/0c/b1/ce83ca7b057b06150519152f53a0b302d7c33c8692ce2f01f669b5a819d9/librt-0.6.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3e84a4121a7ae360ca4da436548a9c1ca8ca134a5ced76c893cc5944426164bd", size = 27753, upload-time = "2025-11-29T14:01:22.558Z" }, - { url = "https://files.pythonhosted.org/packages/3b/ec/739a885ef0a2839b6c25f1b01c99149d2cb6a34e933ffc8c051fcd22012e/librt-0.6.3-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:05f385a414de3f950886ea0aad8f109650d4b712cf9cc14cc17f5f62a9ab240b", size = 83178, upload-time = "2025-11-29T14:01:23.555Z" }, - { url = "https://files.pythonhosted.org/packages/db/bd/dc18bb1489d48c0911b9f4d72eae2d304ea264e215ba80f1e6ba4a9fc41d/librt-0.6.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36a8e337461150b05ca2c7bdedb9e591dfc262c5230422cea398e89d0c746cdc", size = 87266, upload-time = "2025-11-29T14:01:24.532Z" }, - { url = "https://files.pythonhosted.org/packages/94/f3/d0c5431b39eef15e48088b2d739ad84b17c2f1a22c0345c6d4c4a42b135e/librt-0.6.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcbe48f6a03979384f27086484dc2a14959be1613cb173458bd58f714f2c48f3", size = 87623, upload-time = "2025-11-29T14:01:25.798Z" }, - { url = "https://files.pythonhosted.org/packages/3b/15/9a52e90834e4bd6ee16cdbaf551cb32227cbaad27398391a189c489318bc/librt-0.6.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4bca9e4c260233fba37b15c4ec2f78aa99c1a79fbf902d19dd4a763c5c3fb751", size = 89436, upload-time = "2025-11-29T14:01:26.769Z" }, - { url = "https://files.pythonhosted.org/packages/c3/8a/a7e78e46e8486e023c50f21758930ef4793999115229afd65de69e94c9cc/librt-0.6.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:760c25ed6ac968e24803eb5f7deb17ce026902d39865e83036bacbf5cf242aa8", size = 87540, upload-time = "2025-11-29T14:01:27.756Z" }, - { url = "https://files.pythonhosted.org/packages/49/01/93799044a1cccac31f1074b07c583e181829d240539657e7f305ae63ae2a/librt-0.6.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4a93a353ccff20df6e34fa855ae8fd788832c88f40a9070e3ddd3356a9f0e", size = 90597, upload-time = "2025-11-29T14:01:29.35Z" }, - { url = "https://files.pythonhosted.org/packages/a7/29/00c7f58b8f8eb1bad6529ffb6c9cdcc0890a27dac59ecda04f817ead5277/librt-0.6.3-cp314-cp314-win32.whl", hash = "sha256:cb92741c2b4ea63c09609b064b26f7f5d9032b61ae222558c55832ec3ad0bcaf", size = 18955, upload-time = "2025-11-29T14:01:30.325Z" }, - { url = "https://files.pythonhosted.org/packages/d7/13/2739e6e197a9f751375a37908a6a5b0bff637b81338497a1bcb5817394da/librt-0.6.3-cp314-cp314-win_amd64.whl", hash = "sha256:fdcd095b1b812d756fa5452aca93b962cf620694c0cadb192cec2bb77dcca9a2", size = 20263, upload-time = "2025-11-29T14:01:31.287Z" }, - { url = "https://files.pythonhosted.org/packages/e1/73/393868fc2158705ea003114a24e73bb10b03bda31e9ad7b5c5ec6575338b/librt-0.6.3-cp314-cp314-win_arm64.whl", hash = "sha256:822ca79e28720a76a935c228d37da6579edef048a17cd98d406a2484d10eda78", size = 19575, upload-time = "2025-11-29T14:01:32.229Z" }, - { url = "https://files.pythonhosted.org/packages/48/6d/3c8ff3dec21bf804a205286dd63fd28dcdbe00b8dd7eb7ccf2e21a40a0b0/librt-0.6.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:078cd77064d1640cb7b0650871a772956066174d92c8aeda188a489b58495179", size = 28732, upload-time = "2025-11-29T14:01:33.165Z" }, - { url = "https://files.pythonhosted.org/packages/f4/90/e214b8b4aa34ed3d3f1040719c06c4d22472c40c5ef81a922d5af7876eb4/librt-0.6.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5cc22f7f5c0cc50ed69f4b15b9c51d602aabc4500b433aaa2ddd29e578f452f7", size = 29065, upload-time = "2025-11-29T14:01:34.088Z" }, - { url = "https://files.pythonhosted.org/packages/ab/90/ef61ed51f0a7770cc703422d907a757bbd8811ce820c333d3db2fd13542a/librt-0.6.3-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:14b345eb7afb61b9fdcdfda6738946bd11b8e0f6be258666b0646af3b9bb5916", size = 93703, upload-time = "2025-11-29T14:01:35.057Z" }, - { url = "https://files.pythonhosted.org/packages/a8/ae/c30bb119c35962cbe9a908a71da99c168056fc3f6e9bbcbc157d0b724d89/librt-0.6.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d46aa46aa29b067f0b8b84f448fd9719aaf5f4c621cc279164d76a9dc9ab3e8", size = 98890, upload-time = "2025-11-29T14:01:36.031Z" }, - { url = "https://files.pythonhosted.org/packages/d1/96/47a4a78d252d36f072b79d592df10600d379a895c3880c8cbd2ac699f0ad/librt-0.6.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1b51ba7d9d5d9001494769eca8c0988adce25d0a970c3ba3f2eb9df9d08036fc", size = 98255, upload-time = "2025-11-29T14:01:37.058Z" }, - { url = "https://files.pythonhosted.org/packages/e5/28/779b5cc3cd9987683884eb5f5672e3251676bebaaae6b7da1cf366eb1da1/librt-0.6.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ced0925a18fddcff289ef54386b2fc230c5af3c83b11558571124bfc485b8c07", size = 100769, upload-time = "2025-11-29T14:01:38.413Z" }, - { url = "https://files.pythonhosted.org/packages/28/d7/771755e57c375cb9d25a4e106f570607fd856e2cb91b02418db1db954796/librt-0.6.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:6bac97e51f66da2ca012adddbe9fd656b17f7368d439de30898f24b39512f40f", size = 98580, upload-time = "2025-11-29T14:01:39.459Z" }, - { url = "https://files.pythonhosted.org/packages/d0/ec/8b157eb8fbc066339a2f34b0aceb2028097d0ed6150a52e23284a311eafe/librt-0.6.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b2922a0e8fa97395553c304edc3bd36168d8eeec26b92478e292e5d4445c1ef0", size = 101706, upload-time = "2025-11-29T14:01:40.474Z" }, - { url = "https://files.pythonhosted.org/packages/82/a8/4aaead9a06c795a318282aebf7d3e3e578fa889ff396e1b640c3be4c7806/librt-0.6.3-cp314-cp314t-win32.whl", hash = "sha256:f33462b19503ba68d80dac8a1354402675849259fb3ebf53b67de86421735a3a", size = 19465, upload-time = "2025-11-29T14:01:41.77Z" }, - { url = "https://files.pythonhosted.org/packages/3a/61/b7e6a02746c1731670c19ba07d86da90b1ae45d29e405c0b5615abf97cde/librt-0.6.3-cp314-cp314t-win_amd64.whl", hash = "sha256:04f8ce401d4f6380cfc42af0f4e67342bf34c820dae01343f58f472dbac75dcf", size = 21042, upload-time = "2025-11-29T14:01:42.865Z" }, - { url = "https://files.pythonhosted.org/packages/0e/3d/72cc9ec90bb80b5b1a65f0bb74a0f540195837baaf3b98c7fa4a7aa9718e/librt-0.6.3-cp314-cp314t-win_arm64.whl", hash = "sha256:afb39550205cc5e5c935762c6bf6a2bb34f7d21a68eadb25e2db7bf3593fecc0", size = 20246, upload-time = "2025-11-29T14:01:44.13Z" }, -] - [[package]] name = "lomond" version = "0.3.3" @@ -1429,10 +1384,11 @@ wheels = [ [[package]] name = "mcp-cli" -version = "0.11" +version = "0.12" source = { editable = "." } dependencies = [ { name = "asyncio" }, + { name = "chuk-ai-session-manager" }, { name = "chuk-llm" }, { name = "chuk-mcp-client-oauth" }, { name = "chuk-term" }, @@ -1462,9 +1418,7 @@ vault = [ [package.dev-dependencies] dev = [ { name = "colorama" }, - { name = "mypy" }, { name = "pydantic" }, - { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-cov" }, { name = "ruff" }, @@ -1474,10 +1428,11 @@ dev = [ requires-dist = [ { name = "asyncio", specifier = ">=3.4.3" }, { name = "asyncio", marker = "extra == 'dev'", specifier = ">=3.4.3" }, - { name = "chuk-llm", specifier = ">=0.14.3" }, + { name = "chuk-ai-session-manager", specifier = ">=0.8" }, + { name = "chuk-llm", specifier = ">=0.17.1" }, { name = "chuk-mcp-client-oauth", specifier = ">=0.3.5" }, - { name = "chuk-term", specifier = ">=0.1.3" }, - { name = "chuk-tool-processor", specifier = ">=0.11.2" }, + { name = "chuk-term", specifier = ">=0.3" }, + { name = "chuk-tool-processor", specifier = ">=0.18" }, { name = "cryptography", specifier = ">=44.0.0" }, { name = "fast-json", specifier = ">=0.3.2" }, { name = "httpx", specifier = ">=0.27.0" }, @@ -1497,9 +1452,7 @@ provides-extras = ["vault", "wasm", "dev"] [package.metadata.requires-dev] dev = [ { name = "colorama", specifier = ">=0.4.6" }, - { name = "mypy", specifier = ">=1.13.0" }, { name = "pydantic", specifier = ">=2.10.2" }, - { name = "pytest", specifier = ">=8.3.4" }, { name = "pytest-asyncio", specifier = ">=0.25.3" }, { name = "pytest-cov", specifier = ">=6.2.1" }, { name = "ruff", specifier = ">=0.12.10" }, @@ -1516,20 +1469,24 @@ wheels = [ [[package]] name = "mistralai" -version = "1.9.11" +version = "1.11.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "eval-type-backport" }, { name = "httpx" }, { name = "invoke" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, { name = "pydantic" }, { name = "python-dateutil" }, { name = "pyyaml" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5a/8d/d8b7af67a966b6f227024e1cb7287fc19901a434f87a5a391dcfe635d338/mistralai-1.9.11.tar.gz", hash = "sha256:3df9e403c31a756ec79e78df25ee73cea3eb15f86693773e16b16adaf59c9b8a", size = 208051, upload-time = "2025-10-02T15:53:40.473Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/65/79f7b19645745fd93f26dcaa1f4e1191ad12d60974c756d5af630b98056b/mistralai-1.11.1.tar.gz", hash = "sha256:0731e8204ab0ff7571a29f722b6547f2ca72541685e3ad426cd2946dba8b84fa", size = 234988, upload-time = "2026-01-29T13:04:28.494Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fe/76/4ce12563aea5a76016f8643eff30ab731e6656c845e9e4d090ef10c7b925/mistralai-1.9.11-py3-none-any.whl", hash = "sha256:7a3dc2b8ef3fceaa3582220234261b5c4e3e03a972563b07afa150e44a25a6d3", size = 442796, upload-time = "2025-10-02T15:53:39.134Z" }, + { url = "https://files.pythonhosted.org/packages/f9/26/71cca7ceb9d5956511a560c98ba48562bf45ab6dd4dc0a026d2298ee60cf/mistralai-1.11.1-py3-none-any.whl", hash = "sha256:c362ccd8840448bf3dba7c88ebd9683d076f0288298dd87a51e4cbdee6eafac1", size = 487293, upload-time = "2026-01-29T13:04:27.217Z" }, ] [[package]] @@ -1543,248 +1500,198 @@ wheels = [ [[package]] name = "multidict" -version = "6.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/34/9e/5c727587644d67b2ed479041e4b1c58e30afc011e3d45d25bbe35781217c/multidict-6.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4d409aa42a94c0b3fa617708ef5276dfe81012ba6753a0370fcc9d0195d0a1fc", size = 76604, upload-time = "2025-10-06T14:48:54.277Z" }, - { url = "https://files.pythonhosted.org/packages/17/e4/67b5c27bd17c085a5ea8f1ec05b8a3e5cba0ca734bfcad5560fb129e70ca/multidict-6.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14c9e076eede3b54c636f8ce1c9c252b5f057c62131211f0ceeec273810c9721", size = 44715, upload-time = "2025-10-06T14:48:55.445Z" }, - { url = "https://files.pythonhosted.org/packages/4d/e1/866a5d77be6ea435711bef2a4291eed11032679b6b28b56b4776ab06ba3e/multidict-6.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c09703000a9d0fa3c3404b27041e574cc7f4df4c6563873246d0e11812a94b6", size = 44332, upload-time = "2025-10-06T14:48:56.706Z" }, - { url = "https://files.pythonhosted.org/packages/31/61/0c2d50241ada71ff61a79518db85ada85fdabfcf395d5968dae1cbda04e5/multidict-6.7.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a265acbb7bb33a3a2d626afbe756371dce0279e7b17f4f4eda406459c2b5ff1c", size = 245212, upload-time = "2025-10-06T14:48:58.042Z" }, - { url = "https://files.pythonhosted.org/packages/ac/e0/919666a4e4b57fff1b57f279be1c9316e6cdc5de8a8b525d76f6598fefc7/multidict-6.7.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51cb455de290ae462593e5b1cb1118c5c22ea7f0d3620d9940bf695cea5a4bd7", size = 246671, upload-time = "2025-10-06T14:49:00.004Z" }, - { url = "https://files.pythonhosted.org/packages/a1/cc/d027d9c5a520f3321b65adea289b965e7bcbd2c34402663f482648c716ce/multidict-6.7.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:db99677b4457c7a5c5a949353e125ba72d62b35f74e26da141530fbb012218a7", size = 225491, upload-time = "2025-10-06T14:49:01.393Z" }, - { url = "https://files.pythonhosted.org/packages/75/c4/bbd633980ce6155a28ff04e6a6492dd3335858394d7bb752d8b108708558/multidict-6.7.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f470f68adc395e0183b92a2f4689264d1ea4b40504a24d9882c27375e6662bb9", size = 257322, upload-time = "2025-10-06T14:49:02.745Z" }, - { url = "https://files.pythonhosted.org/packages/4c/6d/d622322d344f1f053eae47e033b0b3f965af01212de21b10bcf91be991fb/multidict-6.7.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0db4956f82723cc1c270de9c6e799b4c341d327762ec78ef82bb962f79cc07d8", size = 254694, upload-time = "2025-10-06T14:49:04.15Z" }, - { url = "https://files.pythonhosted.org/packages/a8/9f/78f8761c2705d4c6d7516faed63c0ebdac569f6db1bef95e0d5218fdc146/multidict-6.7.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3e56d780c238f9e1ae66a22d2adf8d16f485381878250db8d496623cd38b22bd", size = 246715, upload-time = "2025-10-06T14:49:05.967Z" }, - { url = "https://files.pythonhosted.org/packages/78/59/950818e04f91b9c2b95aab3d923d9eabd01689d0dcd889563988e9ea0fd8/multidict-6.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9d14baca2ee12c1a64740d4531356ba50b82543017f3ad6de0deb943c5979abb", size = 243189, upload-time = "2025-10-06T14:49:07.37Z" }, - { url = "https://files.pythonhosted.org/packages/7a/3d/77c79e1934cad2ee74991840f8a0110966d9599b3af95964c0cd79bb905b/multidict-6.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:295a92a76188917c7f99cda95858c822f9e4aae5824246bba9b6b44004ddd0a6", size = 237845, upload-time = "2025-10-06T14:49:08.759Z" }, - { url = "https://files.pythonhosted.org/packages/63/1b/834ce32a0a97a3b70f86437f685f880136677ac00d8bce0027e9fd9c2db7/multidict-6.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39f1719f57adbb767ef592a50ae5ebb794220d1188f9ca93de471336401c34d2", size = 246374, upload-time = "2025-10-06T14:49:10.574Z" }, - { url = "https://files.pythonhosted.org/packages/23/ef/43d1c3ba205b5dec93dc97f3fba179dfa47910fc73aaaea4f7ceb41cec2a/multidict-6.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0a13fb8e748dfc94749f622de065dd5c1def7e0d2216dba72b1d8069a389c6ff", size = 253345, upload-time = "2025-10-06T14:49:12.331Z" }, - { url = "https://files.pythonhosted.org/packages/6b/03/eaf95bcc2d19ead522001f6a650ef32811aa9e3624ff0ad37c445c7a588c/multidict-6.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e3aa16de190d29a0ea1b48253c57d99a68492c8dd8948638073ab9e74dc9410b", size = 246940, upload-time = "2025-10-06T14:49:13.821Z" }, - { url = "https://files.pythonhosted.org/packages/e8/df/ec8a5fd66ea6cd6f525b1fcbb23511b033c3e9bc42b81384834ffa484a62/multidict-6.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a048ce45dcdaaf1defb76b2e684f997fb5abf74437b6cb7b22ddad934a964e34", size = 242229, upload-time = "2025-10-06T14:49:15.603Z" }, - { url = "https://files.pythonhosted.org/packages/8a/a2/59b405d59fd39ec86d1142630e9049243015a5f5291ba49cadf3c090c541/multidict-6.7.0-cp311-cp311-win32.whl", hash = "sha256:a90af66facec4cebe4181b9e62a68be65e45ac9b52b67de9eec118701856e7ff", size = 41308, upload-time = "2025-10-06T14:49:16.871Z" }, - { url = "https://files.pythonhosted.org/packages/32/0f/13228f26f8b882c34da36efa776c3b7348455ec383bab4a66390e42963ae/multidict-6.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:95b5ffa4349df2887518bb839409bcf22caa72d82beec453216802f475b23c81", size = 46037, upload-time = "2025-10-06T14:49:18.457Z" }, - { url = "https://files.pythonhosted.org/packages/84/1f/68588e31b000535a3207fd3c909ebeec4fb36b52c442107499c18a896a2a/multidict-6.7.0-cp311-cp311-win_arm64.whl", hash = "sha256:329aa225b085b6f004a4955271a7ba9f1087e39dcb7e65f6284a988264a63912", size = 43023, upload-time = "2025-10-06T14:49:19.648Z" }, - { url = "https://files.pythonhosted.org/packages/c2/9e/9f61ac18d9c8b475889f32ccfa91c9f59363480613fc807b6e3023d6f60b/multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184", size = 76877, upload-time = "2025-10-06T14:49:20.884Z" }, - { url = "https://files.pythonhosted.org/packages/38/6f/614f09a04e6184f8824268fce4bc925e9849edfa654ddd59f0b64508c595/multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45", size = 45467, upload-time = "2025-10-06T14:49:22.054Z" }, - { url = "https://files.pythonhosted.org/packages/b3/93/c4f67a436dd026f2e780c433277fff72be79152894d9fc36f44569cab1a6/multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa", size = 43834, upload-time = "2025-10-06T14:49:23.566Z" }, - { url = "https://files.pythonhosted.org/packages/7f/f5/013798161ca665e4a422afbc5e2d9e4070142a9ff8905e482139cd09e4d0/multidict-6.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0934f3843a1860dd465d38895c17fce1f1cb37295149ab05cd1b9a03afacb2a7", size = 250545, upload-time = "2025-10-06T14:49:24.882Z" }, - { url = "https://files.pythonhosted.org/packages/71/2f/91dbac13e0ba94669ea5119ba267c9a832f0cb65419aca75549fcf09a3dc/multidict-6.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3e34f3a1b8131ba06f1a73adab24f30934d148afcd5f5de9a73565a4404384e", size = 258305, upload-time = "2025-10-06T14:49:26.778Z" }, - { url = "https://files.pythonhosted.org/packages/ef/b0/754038b26f6e04488b48ac621f779c341338d78503fb45403755af2df477/multidict-6.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:efbb54e98446892590dc2458c19c10344ee9a883a79b5cec4bc34d6656e8d546", size = 242363, upload-time = "2025-10-06T14:49:28.562Z" }, - { url = "https://files.pythonhosted.org/packages/87/15/9da40b9336a7c9fa606c4cf2ed80a649dffeb42b905d4f63a1d7eb17d746/multidict-6.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a35c5fc61d4f51eb045061e7967cfe3123d622cd500e8868e7c0c592a09fedc4", size = 268375, upload-time = "2025-10-06T14:49:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/82/72/c53fcade0cc94dfaad583105fd92b3a783af2091eddcb41a6d5a52474000/multidict-6.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29fe6740ebccba4175af1b9b87bf553e9c15cd5868ee967e010efcf94e4fd0f1", size = 269346, upload-time = "2025-10-06T14:49:31.404Z" }, - { url = "https://files.pythonhosted.org/packages/0d/e2/9baffdae21a76f77ef8447f1a05a96ec4bc0a24dae08767abc0a2fe680b8/multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:123e2a72e20537add2f33a79e605f6191fba2afda4cbb876e35c1a7074298a7d", size = 256107, upload-time = "2025-10-06T14:49:32.974Z" }, - { url = "https://files.pythonhosted.org/packages/3c/06/3f06f611087dc60d65ef775f1fb5aca7c6d61c6db4990e7cda0cef9b1651/multidict-6.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b284e319754366c1aee2267a2036248b24eeb17ecd5dc16022095e747f2f4304", size = 253592, upload-time = "2025-10-06T14:49:34.52Z" }, - { url = "https://files.pythonhosted.org/packages/20/24/54e804ec7945b6023b340c412ce9c3f81e91b3bf5fa5ce65558740141bee/multidict-6.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:803d685de7be4303b5a657b76e2f6d1240e7e0a8aa2968ad5811fa2285553a12", size = 251024, upload-time = "2025-10-06T14:49:35.956Z" }, - { url = "https://files.pythonhosted.org/packages/14/48/011cba467ea0b17ceb938315d219391d3e421dfd35928e5dbdc3f4ae76ef/multidict-6.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c04a328260dfd5db8c39538f999f02779012268f54614902d0afc775d44e0a62", size = 251484, upload-time = "2025-10-06T14:49:37.631Z" }, - { url = "https://files.pythonhosted.org/packages/0d/2f/919258b43bb35b99fa127435cfb2d91798eb3a943396631ef43e3720dcf4/multidict-6.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8a19cdb57cd3df4cd865849d93ee14920fb97224300c88501f16ecfa2604b4e0", size = 263579, upload-time = "2025-10-06T14:49:39.502Z" }, - { url = "https://files.pythonhosted.org/packages/31/22/a0e884d86b5242b5a74cf08e876bdf299e413016b66e55511f7a804a366e/multidict-6.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b2fd74c52accced7e75de26023b7dccee62511a600e62311b918ec5c168fc2a", size = 259654, upload-time = "2025-10-06T14:49:41.32Z" }, - { url = "https://files.pythonhosted.org/packages/b2/e5/17e10e1b5c5f5a40f2fcbb45953c9b215f8a4098003915e46a93f5fcaa8f/multidict-6.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e8bfdd0e487acf992407a140d2589fe598238eaeffa3da8448d63a63cd363f8", size = 251511, upload-time = "2025-10-06T14:49:46.021Z" }, - { url = "https://files.pythonhosted.org/packages/e3/9a/201bb1e17e7af53139597069c375e7b0dcbd47594604f65c2d5359508566/multidict-6.7.0-cp312-cp312-win32.whl", hash = "sha256:dd32a49400a2c3d52088e120ee00c1e3576cbff7e10b98467962c74fdb762ed4", size = 41895, upload-time = "2025-10-06T14:49:48.718Z" }, - { url = "https://files.pythonhosted.org/packages/46/e2/348cd32faad84eaf1d20cce80e2bb0ef8d312c55bca1f7fa9865e7770aaf/multidict-6.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:92abb658ef2d7ef22ac9f8bb88e8b6c3e571671534e029359b6d9e845923eb1b", size = 46073, upload-time = "2025-10-06T14:49:50.28Z" }, - { url = "https://files.pythonhosted.org/packages/25/ec/aad2613c1910dce907480e0c3aa306905830f25df2e54ccc9dea450cb5aa/multidict-6.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:490dab541a6a642ce1a9d61a4781656b346a55c13038f0b1244653828e3a83ec", size = 43226, upload-time = "2025-10-06T14:49:52.304Z" }, - { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, - { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, - { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, - { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, - { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, - { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, - { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, - { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, - { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, - { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, - { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, - { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, - { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, - { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, - { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, - { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, - { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, - { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, - { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, - { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, - { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, - { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, - { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, - { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, - { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, - { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, - { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, - { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, - { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, - { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, - { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, - { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, - { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, - { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, - { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, - { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, - { url = "https://files.pythonhosted.org/packages/e2/b1/3da6934455dd4b261d4c72f897e3a5728eba81db59959f3a639245891baa/multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842", size = 75128, upload-time = "2025-10-06T14:50:51.92Z" }, - { url = "https://files.pythonhosted.org/packages/14/2c/f069cab5b51d175a1a2cb4ccdf7a2c2dabd58aa5bd933fa036a8d15e2404/multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b", size = 44410, upload-time = "2025-10-06T14:50:53.275Z" }, - { url = "https://files.pythonhosted.org/packages/42/e2/64bb41266427af6642b6b128e8774ed84c11b80a90702c13ac0a86bb10cc/multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38", size = 43205, upload-time = "2025-10-06T14:50:54.911Z" }, - { url = "https://files.pythonhosted.org/packages/02/68/6b086fef8a3f1a8541b9236c594f0c9245617c29841f2e0395d979485cde/multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128", size = 245084, upload-time = "2025-10-06T14:50:56.369Z" }, - { url = "https://files.pythonhosted.org/packages/15/ee/f524093232007cd7a75c1d132df70f235cfd590a7c9eaccd7ff422ef4ae8/multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34", size = 252667, upload-time = "2025-10-06T14:50:57.991Z" }, - { url = "https://files.pythonhosted.org/packages/02/a5/eeb3f43ab45878f1895118c3ef157a480db58ede3f248e29b5354139c2c9/multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99", size = 233590, upload-time = "2025-10-06T14:50:59.589Z" }, - { url = "https://files.pythonhosted.org/packages/6a/1e/76d02f8270b97269d7e3dbd45644b1785bda457b474315f8cf999525a193/multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202", size = 264112, upload-time = "2025-10-06T14:51:01.183Z" }, - { url = "https://files.pythonhosted.org/packages/76/0b/c28a70ecb58963847c2a8efe334904cd254812b10e535aefb3bcce513918/multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1", size = 261194, upload-time = "2025-10-06T14:51:02.794Z" }, - { url = "https://files.pythonhosted.org/packages/b4/63/2ab26e4209773223159b83aa32721b4021ffb08102f8ac7d689c943fded1/multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3", size = 248510, upload-time = "2025-10-06T14:51:04.724Z" }, - { url = "https://files.pythonhosted.org/packages/93/cd/06c1fa8282af1d1c46fd55c10a7930af652afdce43999501d4d68664170c/multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d", size = 248395, upload-time = "2025-10-06T14:51:06.306Z" }, - { url = "https://files.pythonhosted.org/packages/99/ac/82cb419dd6b04ccf9e7e61befc00c77614fc8134362488b553402ecd55ce/multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6", size = 239520, upload-time = "2025-10-06T14:51:08.091Z" }, - { url = "https://files.pythonhosted.org/packages/fa/f3/a0f9bf09493421bd8716a362e0cd1d244f5a6550f5beffdd6b47e885b331/multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7", size = 245479, upload-time = "2025-10-06T14:51:10.365Z" }, - { url = "https://files.pythonhosted.org/packages/8d/01/476d38fc73a212843f43c852b0eee266b6971f0e28329c2184a8df90c376/multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb", size = 258903, upload-time = "2025-10-06T14:51:12.466Z" }, - { url = "https://files.pythonhosted.org/packages/49/6d/23faeb0868adba613b817d0e69c5f15531b24d462af8012c4f6de4fa8dc3/multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f", size = 252333, upload-time = "2025-10-06T14:51:14.48Z" }, - { url = "https://files.pythonhosted.org/packages/1e/cc/48d02ac22b30fa247f7dad82866e4b1015431092f4ba6ebc7e77596e0b18/multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f", size = 243411, upload-time = "2025-10-06T14:51:16.072Z" }, - { url = "https://files.pythonhosted.org/packages/4a/03/29a8bf5a18abf1fe34535c88adbdfa88c9fb869b5a3b120692c64abe8284/multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885", size = 40940, upload-time = "2025-10-06T14:51:17.544Z" }, - { url = "https://files.pythonhosted.org/packages/82/16/7ed27b680791b939de138f906d5cf2b4657b0d45ca6f5dd6236fdddafb1a/multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c", size = 45087, upload-time = "2025-10-06T14:51:18.875Z" }, - { url = "https://files.pythonhosted.org/packages/cd/3c/e3e62eb35a1950292fe39315d3c89941e30a9d07d5d2df42965ab041da43/multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000", size = 42368, upload-time = "2025-10-06T14:51:20.225Z" }, - { url = "https://files.pythonhosted.org/packages/8b/40/cd499bd0dbc5f1136726db3153042a735fffd0d77268e2ee20d5f33c010f/multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63", size = 82326, upload-time = "2025-10-06T14:51:21.588Z" }, - { url = "https://files.pythonhosted.org/packages/13/8a/18e031eca251c8df76daf0288e6790561806e439f5ce99a170b4af30676b/multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718", size = 48065, upload-time = "2025-10-06T14:51:22.93Z" }, - { url = "https://files.pythonhosted.org/packages/40/71/5e6701277470a87d234e433fb0a3a7deaf3bcd92566e421e7ae9776319de/multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2", size = 46475, upload-time = "2025-10-06T14:51:24.352Z" }, - { url = "https://files.pythonhosted.org/packages/fe/6a/bab00cbab6d9cfb57afe1663318f72ec28289ea03fd4e8236bb78429893a/multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e", size = 239324, upload-time = "2025-10-06T14:51:25.822Z" }, - { url = "https://files.pythonhosted.org/packages/2a/5f/8de95f629fc22a7769ade8b41028e3e5a822c1f8904f618d175945a81ad3/multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064", size = 246877, upload-time = "2025-10-06T14:51:27.604Z" }, - { url = "https://files.pythonhosted.org/packages/23/b4/38881a960458f25b89e9f4a4fdcb02ac101cfa710190db6e5528841e67de/multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e", size = 225824, upload-time = "2025-10-06T14:51:29.664Z" }, - { url = "https://files.pythonhosted.org/packages/1e/39/6566210c83f8a261575f18e7144736059f0c460b362e96e9cf797a24b8e7/multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd", size = 253558, upload-time = "2025-10-06T14:51:31.684Z" }, - { url = "https://files.pythonhosted.org/packages/00/a3/67f18315100f64c269f46e6c0319fa87ba68f0f64f2b8e7fd7c72b913a0b/multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a", size = 252339, upload-time = "2025-10-06T14:51:33.699Z" }, - { url = "https://files.pythonhosted.org/packages/c8/2a/1cb77266afee2458d82f50da41beba02159b1d6b1f7973afc9a1cad1499b/multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96", size = 244895, upload-time = "2025-10-06T14:51:36.189Z" }, - { url = "https://files.pythonhosted.org/packages/dd/72/09fa7dd487f119b2eb9524946ddd36e2067c08510576d43ff68469563b3b/multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e", size = 241862, upload-time = "2025-10-06T14:51:41.291Z" }, - { url = "https://files.pythonhosted.org/packages/65/92/bc1f8bd0853d8669300f732c801974dfc3702c3eeadae2f60cef54dc69d7/multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599", size = 232376, upload-time = "2025-10-06T14:51:43.55Z" }, - { url = "https://files.pythonhosted.org/packages/09/86/ac39399e5cb9d0c2ac8ef6e10a768e4d3bc933ac808d49c41f9dc23337eb/multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394", size = 240272, upload-time = "2025-10-06T14:51:45.265Z" }, - { url = "https://files.pythonhosted.org/packages/3d/b6/fed5ac6b8563ec72df6cb1ea8dac6d17f0a4a1f65045f66b6d3bf1497c02/multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38", size = 248774, upload-time = "2025-10-06T14:51:46.836Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8d/b954d8c0dc132b68f760aefd45870978deec6818897389dace00fcde32ff/multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9", size = 242731, upload-time = "2025-10-06T14:51:48.541Z" }, - { url = "https://files.pythonhosted.org/packages/16/9d/a2dac7009125d3540c2f54e194829ea18ac53716c61b655d8ed300120b0f/multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0", size = 240193, upload-time = "2025-10-06T14:51:50.355Z" }, - { url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" }, - { url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" }, - { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, -] - -[[package]] -name = "mypy" -version = "1.19.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "librt" }, - { name = "mypy-extensions" }, - { name = "pathspec" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f9/b5/b58cdc25fadd424552804bf410855d52324183112aa004f0732c5f6324cf/mypy-1.19.0.tar.gz", hash = "sha256:f6b874ca77f733222641e5c46e4711648c4037ea13646fd0cdc814c2eaec2528", size = 3579025, upload-time = "2025-11-28T15:49:01.26Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/d2/010fb171ae5ac4a01cc34fbacd7544531e5ace95c35ca166dd8fd1b901d0/mypy-1.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a31e4c28e8ddb042c84c5e977e28a21195d086aaffaf08b016b78e19c9ef8106", size = 13010563, upload-time = "2025-11-28T15:48:23.975Z" }, - { url = "https://files.pythonhosted.org/packages/41/6b/63f095c9f1ce584fdeb595d663d49e0980c735a1d2004720ccec252c5d47/mypy-1.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34ec1ac66d31644f194b7c163d7f8b8434f1b49719d403a5d26c87fff7e913f7", size = 12077037, upload-time = "2025-11-28T15:47:51.582Z" }, - { url = "https://files.pythonhosted.org/packages/d7/83/6cb93d289038d809023ec20eb0b48bbb1d80af40511fa077da78af6ff7c7/mypy-1.19.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cb64b0ba5980466a0f3f9990d1c582bcab8db12e29815ecb57f1408d99b4bff7", size = 12680255, upload-time = "2025-11-28T15:46:57.628Z" }, - { url = "https://files.pythonhosted.org/packages/99/db/d217815705987d2cbace2edd9100926196d6f85bcb9b5af05058d6e3c8ad/mypy-1.19.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:120cffe120cca5c23c03c77f84abc0c14c5d2e03736f6c312480020082f1994b", size = 13421472, upload-time = "2025-11-28T15:47:59.655Z" }, - { url = "https://files.pythonhosted.org/packages/4e/51/d2beaca7c497944b07594f3f8aad8d2f0e8fc53677059848ae5d6f4d193e/mypy-1.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7a500ab5c444268a70565e374fc803972bfd1f09545b13418a5174e29883dab7", size = 13651823, upload-time = "2025-11-28T15:45:29.318Z" }, - { url = "https://files.pythonhosted.org/packages/aa/d1/7883dcf7644db3b69490f37b51029e0870aac4a7ad34d09ceae709a3df44/mypy-1.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:c14a98bc63fd867530e8ec82f217dae29d0550c86e70debc9667fff1ec83284e", size = 10049077, upload-time = "2025-11-28T15:45:39.818Z" }, - { url = "https://files.pythonhosted.org/packages/11/7e/1afa8fb188b876abeaa14460dc4983f909aaacaa4bf5718c00b2c7e0b3d5/mypy-1.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0fb3115cb8fa7c5f887c8a8d81ccdcb94cff334684980d847e5a62e926910e1d", size = 13207728, upload-time = "2025-11-28T15:46:26.463Z" }, - { url = "https://files.pythonhosted.org/packages/b2/13/f103d04962bcbefb1644f5ccb235998b32c337d6c13145ea390b9da47f3e/mypy-1.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3e19e3b897562276bb331074d64c076dbdd3e79213f36eed4e592272dabd760", size = 12202945, upload-time = "2025-11-28T15:48:49.143Z" }, - { url = "https://files.pythonhosted.org/packages/e4/93/a86a5608f74a22284a8ccea8592f6e270b61f95b8588951110ad797c2ddd/mypy-1.19.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b9d491295825182fba01b6ffe2c6fe4e5a49dbf4e2bb4d1217b6ced3b4797bc6", size = 12718673, upload-time = "2025-11-28T15:47:37.193Z" }, - { url = "https://files.pythonhosted.org/packages/3d/58/cf08fff9ced0423b858f2a7495001fda28dc058136818ee9dffc31534ea9/mypy-1.19.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6016c52ab209919b46169651b362068f632efcd5eb8ef9d1735f6f86da7853b2", size = 13608336, upload-time = "2025-11-28T15:48:32.625Z" }, - { url = "https://files.pythonhosted.org/packages/64/ed/9c509105c5a6d4b73bb08733102a3ea62c25bc02c51bca85e3134bf912d3/mypy-1.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f188dcf16483b3e59f9278c4ed939ec0254aa8a60e8fc100648d9ab5ee95a431", size = 13833174, upload-time = "2025-11-28T15:45:48.091Z" }, - { url = "https://files.pythonhosted.org/packages/cd/71/01939b66e35c6f8cb3e6fdf0b657f0fd24de2f8ba5e523625c8e72328208/mypy-1.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:0e3c3d1e1d62e678c339e7ade72746a9e0325de42cd2cccc51616c7b2ed1a018", size = 10112208, upload-time = "2025-11-28T15:46:41.702Z" }, - { url = "https://files.pythonhosted.org/packages/cb/0d/a1357e6bb49e37ce26fcf7e3cc55679ce9f4ebee0cd8b6ee3a0e301a9210/mypy-1.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7686ed65dbabd24d20066f3115018d2dce030d8fa9db01aa9f0a59b6813e9f9e", size = 13191993, upload-time = "2025-11-28T15:47:22.336Z" }, - { url = "https://files.pythonhosted.org/packages/5d/75/8e5d492a879ec4490e6ba664b5154e48c46c85b5ac9785792a5ec6a4d58f/mypy-1.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fd4a985b2e32f23bead72e2fb4bbe5d6aceee176be471243bd831d5b2644672d", size = 12174411, upload-time = "2025-11-28T15:44:55.492Z" }, - { url = "https://files.pythonhosted.org/packages/71/31/ad5dcee9bfe226e8eaba777e9d9d251c292650130f0450a280aec3485370/mypy-1.19.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc51a5b864f73a3a182584b1ac75c404396a17eced54341629d8bdcb644a5bba", size = 12727751, upload-time = "2025-11-28T15:44:14.169Z" }, - { url = "https://files.pythonhosted.org/packages/77/06/b6b8994ce07405f6039701f4b66e9d23f499d0b41c6dd46ec28f96d57ec3/mypy-1.19.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:37af5166f9475872034b56c5efdcf65ee25394e9e1d172907b84577120714364", size = 13593323, upload-time = "2025-11-28T15:46:34.699Z" }, - { url = "https://files.pythonhosted.org/packages/68/b1/126e274484cccdf099a8e328d4fda1c7bdb98a5e888fa6010b00e1bbf330/mypy-1.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:510c014b722308c9bd377993bcbf9a07d7e0692e5fa8fc70e639c1eb19fc6bee", size = 13818032, upload-time = "2025-11-28T15:46:18.286Z" }, - { url = "https://files.pythonhosted.org/packages/f8/56/53a8f70f562dfc466c766469133a8a4909f6c0012d83993143f2a9d48d2d/mypy-1.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:cabbee74f29aa9cd3b444ec2f1e4fa5a9d0d746ce7567a6a609e224429781f53", size = 10120644, upload-time = "2025-11-28T15:47:43.99Z" }, - { url = "https://files.pythonhosted.org/packages/b0/f4/7751f32f56916f7f8c229fe902cbdba3e4dd3f3ea9e8b872be97e7fc546d/mypy-1.19.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:f2e36bed3c6d9b5f35d28b63ca4b727cb0228e480826ffc8953d1892ddc8999d", size = 13185236, upload-time = "2025-11-28T15:45:20.696Z" }, - { url = "https://files.pythonhosted.org/packages/35/31/871a9531f09e78e8d145032355890384f8a5b38c95a2c7732d226b93242e/mypy-1.19.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a18d8abdda14035c5718acb748faec09571432811af129bf0d9e7b2d6699bf18", size = 12213902, upload-time = "2025-11-28T15:46:10.117Z" }, - { url = "https://files.pythonhosted.org/packages/58/b8/af221910dd40eeefa2077a59107e611550167b9994693fc5926a0b0f87c0/mypy-1.19.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75e60aca3723a23511948539b0d7ed514dda194bc3755eae0bfc7a6b4887aa7", size = 12738600, upload-time = "2025-11-28T15:44:22.521Z" }, - { url = "https://files.pythonhosted.org/packages/11/9f/c39e89a3e319c1d9c734dedec1183b2cc3aefbab066ec611619002abb932/mypy-1.19.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f44f2ae3c58421ee05fe609160343c25f70e3967f6e32792b5a78006a9d850f", size = 13592639, upload-time = "2025-11-28T15:48:08.55Z" }, - { url = "https://files.pythonhosted.org/packages/97/6d/ffaf5f01f5e284d9033de1267e6c1b8f3783f2cf784465378a86122e884b/mypy-1.19.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:63ea6a00e4bd6822adbfc75b02ab3653a17c02c4347f5bb0cf1d5b9df3a05835", size = 13799132, upload-time = "2025-11-28T15:47:06.032Z" }, - { url = "https://files.pythonhosted.org/packages/fe/b0/c33921e73aaa0106224e5a34822411bea38046188eb781637f5a5b07e269/mypy-1.19.0-cp314-cp314-win_amd64.whl", hash = "sha256:3ad925b14a0bb99821ff6f734553294aa6a3440a8cb082fe1f5b84dfb662afb1", size = 10269832, upload-time = "2025-11-28T15:47:29.392Z" }, - { url = "https://files.pythonhosted.org/packages/09/0e/fe228ed5aeab470c6f4eb82481837fadb642a5aa95cc8215fd2214822c10/mypy-1.19.0-py3-none-any.whl", hash = "sha256:0c01c99d626380752e527d5ce8e69ffbba2046eb8a060db0329690849cf9b6f9", size = 2469714, upload-time = "2025-11-28T15:45:33.22Z" }, -] - -[[package]] -name = "mypy-extensions" -version = "1.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +version = "6.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/c2/c2d94cbe6ac1753f3fc980da97b3d930efe1da3af3c9f5125354436c073d/multidict-6.7.1.tar.gz", hash = "sha256:ec6652a1bee61c53a3e5776b6049172c53b6aaba34f18c9ad04f82712bac623d", size = 102010, upload-time = "2026-01-26T02:46:45.979Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/f1/a90635c4f88fb913fbf4ce660b83b7445b7a02615bda034b2f8eb38fd597/multidict-6.7.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7ff981b266af91d7b4b3793ca3382e53229088d193a85dfad6f5f4c27fc73e5d", size = 76626, upload-time = "2026-01-26T02:43:26.485Z" }, + { url = "https://files.pythonhosted.org/packages/a6/9b/267e64eaf6fc637a15b35f5de31a566634a2740f97d8d094a69d34f524a4/multidict-6.7.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:844c5bca0b5444adb44a623fb0a1310c2f4cd41f402126bb269cd44c9b3f3e1e", size = 44706, upload-time = "2026-01-26T02:43:27.607Z" }, + { url = "https://files.pythonhosted.org/packages/dd/a4/d45caf2b97b035c57267791ecfaafbd59c68212004b3842830954bb4b02e/multidict-6.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f2a0a924d4c2e9afcd7ec64f9de35fcd96915149b2216e1cb2c10a56df483855", size = 44356, upload-time = "2026-01-26T02:43:28.661Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d2/0a36c8473f0cbaeadd5db6c8b72d15bbceeec275807772bfcd059bef487d/multidict-6.7.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8be1802715a8e892c784c0197c2ace276ea52702a0ede98b6310c8f255a5afb3", size = 244355, upload-time = "2026-01-26T02:43:31.165Z" }, + { url = "https://files.pythonhosted.org/packages/5d/16/8c65be997fd7dd311b7d39c7b6e71a0cb449bad093761481eccbbe4b42a2/multidict-6.7.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e2d2ed645ea29f31c4c7ea1552fcfd7cb7ba656e1eafd4134a6620c9f5fdd9e", size = 246433, upload-time = "2026-01-26T02:43:32.581Z" }, + { url = "https://files.pythonhosted.org/packages/01/fb/4dbd7e848d2799c6a026ec88ad39cf2b8416aa167fcc903baa55ecaa045c/multidict-6.7.1-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:95922cee9a778659e91db6497596435777bd25ed116701a4c034f8e46544955a", size = 225376, upload-time = "2026-01-26T02:43:34.417Z" }, + { url = "https://files.pythonhosted.org/packages/b6/8a/4a3a6341eac3830f6053062f8fbc9a9e54407c80755b3f05bc427295c2d0/multidict-6.7.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6b83cabdc375ffaaa15edd97eb7c0c672ad788e2687004990074d7d6c9b140c8", size = 257365, upload-time = "2026-01-26T02:43:35.741Z" }, + { url = "https://files.pythonhosted.org/packages/f7/a2/dd575a69c1aa206e12d27d0770cdf9b92434b48a9ef0cd0d1afdecaa93c4/multidict-6.7.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:38fb49540705369bab8484db0689d86c0a33a0a9f2c1b197f506b71b4b6c19b0", size = 254747, upload-time = "2026-01-26T02:43:36.976Z" }, + { url = "https://files.pythonhosted.org/packages/5a/56/21b27c560c13822ed93133f08aa6372c53a8e067f11fbed37b4adcdac922/multidict-6.7.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:439cbebd499f92e9aa6793016a8acaa161dfa749ae86d20960189f5398a19144", size = 246293, upload-time = "2026-01-26T02:43:38.258Z" }, + { url = "https://files.pythonhosted.org/packages/5a/a4/23466059dc3854763423d0ad6c0f3683a379d97673b1b89ec33826e46728/multidict-6.7.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6d3bc717b6fe763b8be3f2bee2701d3c8eb1b2a8ae9f60910f1b2860c82b6c49", size = 242962, upload-time = "2026-01-26T02:43:40.034Z" }, + { url = "https://files.pythonhosted.org/packages/1f/67/51dd754a3524d685958001e8fa20a0f5f90a6a856e0a9dcabff69be3dbb7/multidict-6.7.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:619e5a1ac57986dbfec9f0b301d865dddf763696435e2962f6d9cf2fdff2bb71", size = 237360, upload-time = "2026-01-26T02:43:41.752Z" }, + { url = "https://files.pythonhosted.org/packages/64/3f/036dfc8c174934d4b55d86ff4f978e558b0e585cef70cfc1ad01adc6bf18/multidict-6.7.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0b38ebffd9be37c1170d33bc0f36f4f262e0a09bc1aac1c34c7aa51a7293f0b3", size = 245940, upload-time = "2026-01-26T02:43:43.042Z" }, + { url = "https://files.pythonhosted.org/packages/3d/20/6214d3c105928ebc353a1c644a6ef1408bc5794fcb4f170bb524a3c16311/multidict-6.7.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:10ae39c9cfe6adedcdb764f5e8411d4a92b055e35573a2eaa88d3323289ef93c", size = 253502, upload-time = "2026-01-26T02:43:44.371Z" }, + { url = "https://files.pythonhosted.org/packages/b1/e2/c653bc4ae1be70a0f836b82172d643fcf1dade042ba2676ab08ec08bff0f/multidict-6.7.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:25167cc263257660290fba06b9318d2026e3c910be240a146e1f66dd114af2b0", size = 247065, upload-time = "2026-01-26T02:43:45.745Z" }, + { url = "https://files.pythonhosted.org/packages/c8/11/a854b4154cd3bd8b1fd375e8a8ca9d73be37610c361543d56f764109509b/multidict-6.7.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:128441d052254f42989ef98b7b6a6ecb1e6f708aa962c7984235316db59f50fa", size = 241870, upload-time = "2026-01-26T02:43:47.054Z" }, + { url = "https://files.pythonhosted.org/packages/13/bf/9676c0392309b5fdae322333d22a829715b570edb9baa8016a517b55b558/multidict-6.7.1-cp311-cp311-win32.whl", hash = "sha256:d62b7f64ffde3b99d06b707a280db04fb3855b55f5a06df387236051d0668f4a", size = 41302, upload-time = "2026-01-26T02:43:48.753Z" }, + { url = "https://files.pythonhosted.org/packages/c9/68/f16a3a8ba6f7b6dc92a1f19669c0810bd2c43fc5a02da13b1cbf8e253845/multidict-6.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:bdbf9f3b332abd0cdb306e7c2113818ab1e922dc84b8f8fd06ec89ed2a19ab8b", size = 45981, upload-time = "2026-01-26T02:43:49.921Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ad/9dd5305253fa00cd3c7555dbef69d5bf4133debc53b87ab8d6a44d411665/multidict-6.7.1-cp311-cp311-win_arm64.whl", hash = "sha256:b8c990b037d2fff2f4e33d3f21b9b531c5745b33a49a7d6dbe7a177266af44f6", size = 43159, upload-time = "2026-01-26T02:43:51.635Z" }, + { url = "https://files.pythonhosted.org/packages/8d/9c/f20e0e2cf80e4b2e4b1c365bf5fe104ee633c751a724246262db8f1a0b13/multidict-6.7.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a90f75c956e32891a4eda3639ce6dd86e87105271f43d43442a3aedf3cddf172", size = 76893, upload-time = "2026-01-26T02:43:52.754Z" }, + { url = "https://files.pythonhosted.org/packages/fe/cf/18ef143a81610136d3da8193da9d80bfe1cb548a1e2d1c775f26b23d024a/multidict-6.7.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fccb473e87eaa1382689053e4a4618e7ba7b9b9b8d6adf2027ee474597128cd", size = 45456, upload-time = "2026-01-26T02:43:53.893Z" }, + { url = "https://files.pythonhosted.org/packages/a9/65/1caac9d4cd32e8433908683446eebc953e82d22b03d10d41a5f0fefe991b/multidict-6.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0fa96985700739c4c7853a43c0b3e169360d6855780021bfc6d0f1ce7c123e7", size = 43872, upload-time = "2026-01-26T02:43:55.041Z" }, + { url = "https://files.pythonhosted.org/packages/cf/3b/d6bd75dc4f3ff7c73766e04e705b00ed6dbbaccf670d9e05a12b006f5a21/multidict-6.7.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cb2a55f408c3043e42b40cc8eecd575afa27b7e0b956dfb190de0f8499a57a53", size = 251018, upload-time = "2026-01-26T02:43:56.198Z" }, + { url = "https://files.pythonhosted.org/packages/fd/80/c959c5933adedb9ac15152e4067c702a808ea183a8b64cf8f31af8ad3155/multidict-6.7.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb0ce7b2a32d09892b3dd6cc44877a0d02a33241fafca5f25c8b6b62374f8b75", size = 258883, upload-time = "2026-01-26T02:43:57.499Z" }, + { url = "https://files.pythonhosted.org/packages/86/85/7ed40adafea3d4f1c8b916e3b5cc3a8e07dfcdcb9cd72800f4ed3ca1b387/multidict-6.7.1-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c3a32d23520ee37bf327d1e1a656fec76a2edd5c038bf43eddfa0572ec49c60b", size = 242413, upload-time = "2026-01-26T02:43:58.755Z" }, + { url = "https://files.pythonhosted.org/packages/d2/57/b8565ff533e48595503c785f8361ff9a4fde4d67de25c207cd0ba3befd03/multidict-6.7.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9c90fed18bffc0189ba814749fdcc102b536e83a9f738a9003e569acd540a733", size = 268404, upload-time = "2026-01-26T02:44:00.216Z" }, + { url = "https://files.pythonhosted.org/packages/e0/50/9810c5c29350f7258180dfdcb2e52783a0632862eb334c4896ac717cebcb/multidict-6.7.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:da62917e6076f512daccfbbde27f46fed1c98fee202f0559adec8ee0de67f71a", size = 269456, upload-time = "2026-01-26T02:44:02.202Z" }, + { url = "https://files.pythonhosted.org/packages/f3/8d/5e5be3ced1d12966fefb5c4ea3b2a5b480afcea36406559442c6e31d4a48/multidict-6.7.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bfde23ef6ed9db7eaee6c37dcec08524cb43903c60b285b172b6c094711b3961", size = 256322, upload-time = "2026-01-26T02:44:03.56Z" }, + { url = "https://files.pythonhosted.org/packages/31/6e/d8a26d81ac166a5592782d208dd90dfdc0a7a218adaa52b45a672b46c122/multidict-6.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3758692429e4e32f1ba0df23219cd0b4fc0a52f476726fff9337d1a57676a582", size = 253955, upload-time = "2026-01-26T02:44:04.845Z" }, + { url = "https://files.pythonhosted.org/packages/59/4c/7c672c8aad41534ba619bcd4ade7a0dc87ed6b8b5c06149b85d3dd03f0cd/multidict-6.7.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:398c1478926eca669f2fd6a5856b6de9c0acf23a2cb59a14c0ba5844fa38077e", size = 251254, upload-time = "2026-01-26T02:44:06.133Z" }, + { url = "https://files.pythonhosted.org/packages/7b/bd/84c24de512cbafbdbc39439f74e967f19570ce7924e3007174a29c348916/multidict-6.7.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c102791b1c4f3ab36ce4101154549105a53dc828f016356b3e3bcae2e3a039d3", size = 252059, upload-time = "2026-01-26T02:44:07.518Z" }, + { url = "https://files.pythonhosted.org/packages/fa/ba/f5449385510825b73d01c2d4087bf6d2fccc20a2d42ac34df93191d3dd03/multidict-6.7.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a088b62bd733e2ad12c50dad01b7d0166c30287c166e137433d3b410add807a6", size = 263588, upload-time = "2026-01-26T02:44:09.382Z" }, + { url = "https://files.pythonhosted.org/packages/d7/11/afc7c677f68f75c84a69fe37184f0f82fce13ce4b92f49f3db280b7e92b3/multidict-6.7.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3d51ff4785d58d3f6c91bdbffcb5e1f7ddfda557727043aa20d20ec4f65e324a", size = 259642, upload-time = "2026-01-26T02:44:10.73Z" }, + { url = "https://files.pythonhosted.org/packages/2b/17/ebb9644da78c4ab36403739e0e6e0e30ebb135b9caf3440825001a0bddcb/multidict-6.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc5907494fccf3e7d3f94f95c91d6336b092b5fc83811720fae5e2765890dfba", size = 251377, upload-time = "2026-01-26T02:44:12.042Z" }, + { url = "https://files.pythonhosted.org/packages/ca/a4/840f5b97339e27846c46307f2530a2805d9d537d8b8bd416af031cad7fa0/multidict-6.7.1-cp312-cp312-win32.whl", hash = "sha256:28ca5ce2fd9716631133d0e9a9b9a745ad7f60bac2bccafb56aa380fc0b6c511", size = 41887, upload-time = "2026-01-26T02:44:14.245Z" }, + { url = "https://files.pythonhosted.org/packages/80/31/0b2517913687895f5904325c2069d6a3b78f66cc641a86a2baf75a05dcbb/multidict-6.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcee94dfbd638784645b066074b338bc9cc155d4b4bffa4adce1615c5a426c19", size = 46053, upload-time = "2026-01-26T02:44:15.371Z" }, + { url = "https://files.pythonhosted.org/packages/0c/5b/aba28e4ee4006ae4c7df8d327d31025d760ffa992ea23812a601d226e682/multidict-6.7.1-cp312-cp312-win_arm64.whl", hash = "sha256:ba0a9fb644d0c1a2194cf7ffb043bd852cea63a57f66fbd33959f7dae18517bf", size = 43307, upload-time = "2026-01-26T02:44:16.852Z" }, + { url = "https://files.pythonhosted.org/packages/f2/22/929c141d6c0dba87d3e1d38fbdf1ba8baba86b7776469f2bc2d3227a1e67/multidict-6.7.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2b41f5fed0ed563624f1c17630cb9941cf2309d4df00e494b551b5f3e3d67a23", size = 76174, upload-time = "2026-01-26T02:44:18.509Z" }, + { url = "https://files.pythonhosted.org/packages/c7/75/bc704ae15fee974f8fccd871305e254754167dce5f9e42d88a2def741a1d/multidict-6.7.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84e61e3af5463c19b67ced91f6c634effb89ef8bfc5ca0267f954451ed4bb6a2", size = 45116, upload-time = "2026-01-26T02:44:19.745Z" }, + { url = "https://files.pythonhosted.org/packages/79/76/55cd7186f498ed080a18440c9013011eb548f77ae1b297206d030eb1180a/multidict-6.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:935434b9853c7c112eee7ac891bc4cb86455aa631269ae35442cb316790c1445", size = 43524, upload-time = "2026-01-26T02:44:21.571Z" }, + { url = "https://files.pythonhosted.org/packages/e9/3c/414842ef8d5a1628d68edee29ba0e5bcf235dbfb3ccd3ea303a7fe8c72ff/multidict-6.7.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:432feb25a1cb67fe82a9680b4d65fb542e4635cb3166cd9c01560651ad60f177", size = 249368, upload-time = "2026-01-26T02:44:22.803Z" }, + { url = "https://files.pythonhosted.org/packages/f6/32/befed7f74c458b4a525e60519fe8d87eef72bb1e99924fa2b0f9d97a221e/multidict-6.7.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e82d14e3c948952a1a85503817e038cba5905a3352de76b9a465075d072fba23", size = 256952, upload-time = "2026-01-26T02:44:24.306Z" }, + { url = "https://files.pythonhosted.org/packages/03/d6/c878a44ba877f366630c860fdf74bfb203c33778f12b6ac274936853c451/multidict-6.7.1-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4cfb48c6ea66c83bcaaf7e4dfa7ec1b6bbcf751b7db85a328902796dfde4c060", size = 240317, upload-time = "2026-01-26T02:44:25.772Z" }, + { url = "https://files.pythonhosted.org/packages/68/49/57421b4d7ad2e9e60e25922b08ceb37e077b90444bde6ead629095327a6f/multidict-6.7.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1d540e51b7e8e170174555edecddbd5538105443754539193e3e1061864d444d", size = 267132, upload-time = "2026-01-26T02:44:27.648Z" }, + { url = "https://files.pythonhosted.org/packages/b7/fe/ec0edd52ddbcea2a2e89e174f0206444a61440b40f39704e64dc807a70bd/multidict-6.7.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:273d23f4b40f3dce4d6c8a821c741a86dec62cded82e1175ba3d99be128147ed", size = 268140, upload-time = "2026-01-26T02:44:29.588Z" }, + { url = "https://files.pythonhosted.org/packages/b0/73/6e1b01cbeb458807aa0831742232dbdd1fa92bfa33f52a3f176b4ff3dc11/multidict-6.7.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d624335fd4fa1c08a53f8b4be7676ebde19cd092b3895c421045ca87895b429", size = 254277, upload-time = "2026-01-26T02:44:30.902Z" }, + { url = "https://files.pythonhosted.org/packages/6a/b2/5fb8c124d7561a4974c342bc8c778b471ebbeb3cc17df696f034a7e9afe7/multidict-6.7.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:12fad252f8b267cc75b66e8fc51b3079604e8d43a75428ffe193cd9e2195dfd6", size = 252291, upload-time = "2026-01-26T02:44:32.31Z" }, + { url = "https://files.pythonhosted.org/packages/5a/96/51d4e4e06bcce92577fcd488e22600bd38e4fd59c20cb49434d054903bd2/multidict-6.7.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:03ede2a6ffbe8ef936b92cb4529f27f42be7f56afcdab5ab739cd5f27fb1cbf9", size = 250156, upload-time = "2026-01-26T02:44:33.734Z" }, + { url = "https://files.pythonhosted.org/packages/db/6b/420e173eec5fba721a50e2a9f89eda89d9c98fded1124f8d5c675f7a0c0f/multidict-6.7.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:90efbcf47dbe33dcf643a1e400d67d59abeac5db07dc3f27d6bdeae497a2198c", size = 249742, upload-time = "2026-01-26T02:44:35.222Z" }, + { url = "https://files.pythonhosted.org/packages/44/a3/ec5b5bd98f306bc2aa297b8c6f11a46714a56b1e6ef5ebda50a4f5d7c5fb/multidict-6.7.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:5c4b9bfc148f5a91be9244d6264c53035c8a0dcd2f51f1c3c6e30e30ebaa1c84", size = 262221, upload-time = "2026-01-26T02:44:36.604Z" }, + { url = "https://files.pythonhosted.org/packages/cd/f7/e8c0d0da0cd1e28d10e624604e1a36bcc3353aaebdfdc3a43c72bc683a12/multidict-6.7.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:401c5a650f3add2472d1d288c26deebc540f99e2fb83e9525007a74cd2116f1d", size = 258664, upload-time = "2026-01-26T02:44:38.008Z" }, + { url = "https://files.pythonhosted.org/packages/52/da/151a44e8016dd33feed44f730bd856a66257c1ee7aed4f44b649fb7edeb3/multidict-6.7.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:97891f3b1b3ffbded884e2916cacf3c6fc87b66bb0dde46f7357404750559f33", size = 249490, upload-time = "2026-01-26T02:44:39.386Z" }, + { url = "https://files.pythonhosted.org/packages/87/af/a3b86bf9630b732897f6fc3f4c4714b90aa4361983ccbdcd6c0339b21b0c/multidict-6.7.1-cp313-cp313-win32.whl", hash = "sha256:e1c5988359516095535c4301af38d8a8838534158f649c05dd1050222321bcb3", size = 41695, upload-time = "2026-01-26T02:44:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/b2/35/e994121b0e90e46134673422dd564623f93304614f5d11886b1b3e06f503/multidict-6.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:960c83bf01a95b12b08fd54324a4eb1d5b52c88932b5cba5d6e712bb3ed12eb5", size = 45884, upload-time = "2026-01-26T02:44:42.488Z" }, + { url = "https://files.pythonhosted.org/packages/ca/61/42d3e5dbf661242a69c97ea363f2d7b46c567da8eadef8890022be6e2ab0/multidict-6.7.1-cp313-cp313-win_arm64.whl", hash = "sha256:563fe25c678aaba333d5399408f5ec3c383ca5b663e7f774dd179a520b8144df", size = 43122, upload-time = "2026-01-26T02:44:43.664Z" }, + { url = "https://files.pythonhosted.org/packages/6d/b3/e6b21c6c4f314bb956016b0b3ef2162590a529b84cb831c257519e7fde44/multidict-6.7.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:c76c4bec1538375dad9d452d246ca5368ad6e1c9039dadcf007ae59c70619ea1", size = 83175, upload-time = "2026-01-26T02:44:44.894Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/23ecd2abfe0957b234f6c960f4ade497f55f2c16aeb684d4ecdbf1c95791/multidict-6.7.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:57b46b24b5d5ebcc978da4ec23a819a9402b4228b8a90d9c656422b4bdd8a963", size = 48460, upload-time = "2026-01-26T02:44:46.106Z" }, + { url = "https://files.pythonhosted.org/packages/c4/57/a0ed92b23f3a042c36bc4227b72b97eca803f5f1801c1ab77c8a212d455e/multidict-6.7.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e954b24433c768ce78ab7929e84ccf3422e46deb45a4dc9f93438f8217fa2d34", size = 46930, upload-time = "2026-01-26T02:44:47.278Z" }, + { url = "https://files.pythonhosted.org/packages/b5/66/02ec7ace29162e447f6382c495dc95826bf931d3818799bbef11e8f7df1a/multidict-6.7.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3bd231490fa7217cc832528e1cd8752a96f0125ddd2b5749390f7c3ec8721b65", size = 242582, upload-time = "2026-01-26T02:44:48.604Z" }, + { url = "https://files.pythonhosted.org/packages/58/18/64f5a795e7677670e872673aca234162514696274597b3708b2c0d276cce/multidict-6.7.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:253282d70d67885a15c8a7716f3a73edf2d635793ceda8173b9ecc21f2fb8292", size = 250031, upload-time = "2026-01-26T02:44:50.544Z" }, + { url = "https://files.pythonhosted.org/packages/c8/ed/e192291dbbe51a8290c5686f482084d31bcd9d09af24f63358c3d42fd284/multidict-6.7.1-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b4c48648d7649c9335cf1927a8b87fa692de3dcb15faa676c6a6f1f1aabda43", size = 228596, upload-time = "2026-01-26T02:44:51.951Z" }, + { url = "https://files.pythonhosted.org/packages/1e/7e/3562a15a60cf747397e7f2180b0a11dc0c38d9175a650e75fa1b4d325e15/multidict-6.7.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:98bc624954ec4d2c7cb074b8eefc2b5d0ce7d482e410df446414355d158fe4ca", size = 257492, upload-time = "2026-01-26T02:44:53.902Z" }, + { url = "https://files.pythonhosted.org/packages/24/02/7d0f9eae92b5249bb50ac1595b295f10e263dd0078ebb55115c31e0eaccd/multidict-6.7.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1b99af4d9eec0b49927b4402bcbb58dea89d3e0db8806a4086117019939ad3dd", size = 255899, upload-time = "2026-01-26T02:44:55.316Z" }, + { url = "https://files.pythonhosted.org/packages/00/e3/9b60ed9e23e64c73a5cde95269ef1330678e9c6e34dd4eb6b431b85b5a10/multidict-6.7.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6aac4f16b472d5b7dc6f66a0d49dd57b0e0902090be16594dc9ebfd3d17c47e7", size = 247970, upload-time = "2026-01-26T02:44:56.783Z" }, + { url = "https://files.pythonhosted.org/packages/3e/06/538e58a63ed5cfb0bd4517e346b91da32fde409d839720f664e9a4ae4f9d/multidict-6.7.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:21f830fe223215dffd51f538e78c172ed7c7f60c9b96a2bf05c4848ad49921c3", size = 245060, upload-time = "2026-01-26T02:44:58.195Z" }, + { url = "https://files.pythonhosted.org/packages/b2/2f/d743a3045a97c895d401e9bd29aaa09b94f5cbdf1bd561609e5a6c431c70/multidict-6.7.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f5dd81c45b05518b9aa4da4aa74e1c93d715efa234fd3e8a179df611cc85e5f4", size = 235888, upload-time = "2026-01-26T02:44:59.57Z" }, + { url = "https://files.pythonhosted.org/packages/38/83/5a325cac191ab28b63c52f14f1131f3b0a55ba3b9aa65a6d0bf2a9b921a0/multidict-6.7.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:eb304767bca2bb92fb9c5bd33cedc95baee5bb5f6c88e63706533a1c06ad08c8", size = 243554, upload-time = "2026-01-26T02:45:01.054Z" }, + { url = "https://files.pythonhosted.org/packages/20/1f/9d2327086bd15da2725ef6aae624208e2ef828ed99892b17f60c344e57ed/multidict-6.7.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c9035dde0f916702850ef66460bc4239d89d08df4d02023a5926e7446724212c", size = 252341, upload-time = "2026-01-26T02:45:02.484Z" }, + { url = "https://files.pythonhosted.org/packages/e8/2c/2a1aa0280cf579d0f6eed8ee5211c4f1730bd7e06c636ba2ee6aafda302e/multidict-6.7.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:af959b9beeb66c822380f222f0e0a1889331597e81f1ded7f374f3ecb0fd6c52", size = 246391, upload-time = "2026-01-26T02:45:03.862Z" }, + { url = "https://files.pythonhosted.org/packages/e5/03/7ca022ffc36c5a3f6e03b179a5ceb829be9da5783e6fe395f347c0794680/multidict-6.7.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:41f2952231456154ee479651491e94118229844dd7226541788be783be2b5108", size = 243422, upload-time = "2026-01-26T02:45:05.296Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1d/b31650eab6c5778aceed46ba735bd97f7c7d2f54b319fa916c0f96e7805b/multidict-6.7.1-cp313-cp313t-win32.whl", hash = "sha256:df9f19c28adcb40b6aae30bbaa1478c389efd50c28d541d76760199fc1037c32", size = 47770, upload-time = "2026-01-26T02:45:06.754Z" }, + { url = "https://files.pythonhosted.org/packages/ac/5b/2d2d1d522e51285bd61b1e20df8f47ae1a9d80839db0b24ea783b3832832/multidict-6.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d54ecf9f301853f2c5e802da559604b3e95bb7a3b01a9c295c6ee591b9882de8", size = 53109, upload-time = "2026-01-26T02:45:08.044Z" }, + { url = "https://files.pythonhosted.org/packages/3d/a3/cc409ba012c83ca024a308516703cf339bdc4b696195644a7215a5164a24/multidict-6.7.1-cp313-cp313t-win_arm64.whl", hash = "sha256:5a37ca18e360377cfda1d62f5f382ff41f2b8c4ccb329ed974cc2e1643440118", size = 45573, upload-time = "2026-01-26T02:45:09.349Z" }, + { url = "https://files.pythonhosted.org/packages/91/cc/db74228a8be41884a567e88a62fd589a913708fcf180d029898c17a9a371/multidict-6.7.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8f333ec9c5eb1b7105e3b84b53141e66ca05a19a605368c55450b6ba208cb9ee", size = 75190, upload-time = "2026-01-26T02:45:10.651Z" }, + { url = "https://files.pythonhosted.org/packages/d5/22/492f2246bb5b534abd44804292e81eeaf835388901f0c574bac4eeec73c5/multidict-6.7.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a407f13c188f804c759fc6a9f88286a565c242a76b27626594c133b82883b5c2", size = 44486, upload-time = "2026-01-26T02:45:11.938Z" }, + { url = "https://files.pythonhosted.org/packages/f1/4f/733c48f270565d78b4544f2baddc2fb2a245e5a8640254b12c36ac7ac68e/multidict-6.7.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0e161ddf326db5577c3a4cc2d8648f81456e8a20d40415541587a71620d7a7d1", size = 43219, upload-time = "2026-01-26T02:45:14.346Z" }, + { url = "https://files.pythonhosted.org/packages/24/bb/2c0c2287963f4259c85e8bcbba9182ced8d7fca65c780c38e99e61629d11/multidict-6.7.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1e3a8bb24342a8201d178c3b4984c26ba81a577c80d4d525727427460a50c22d", size = 245132, upload-time = "2026-01-26T02:45:15.712Z" }, + { url = "https://files.pythonhosted.org/packages/a7/f9/44d4b3064c65079d2467888794dea218d1601898ac50222ab8a9a8094460/multidict-6.7.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97231140a50f5d447d3164f994b86a0bed7cd016e2682f8650d6a9158e14fd31", size = 252420, upload-time = "2026-01-26T02:45:17.293Z" }, + { url = "https://files.pythonhosted.org/packages/8b/13/78f7275e73fa17b24c9a51b0bd9d73ba64bb32d0ed51b02a746eb876abe7/multidict-6.7.1-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6b10359683bd8806a200fd2909e7c8ca3a7b24ec1d8132e483d58e791d881048", size = 233510, upload-time = "2026-01-26T02:45:19.356Z" }, + { url = "https://files.pythonhosted.org/packages/4b/25/8167187f62ae3cbd52da7893f58cb036b47ea3fb67138787c76800158982/multidict-6.7.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:283ddac99f7ac25a4acadbf004cb5ae34480bbeb063520f70ce397b281859362", size = 264094, upload-time = "2026-01-26T02:45:20.834Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e7/69a3a83b7b030cf283fb06ce074a05a02322359783424d7edf0f15fe5022/multidict-6.7.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:538cec1e18c067d0e6103aa9a74f9e832904c957adc260e61cd9d8cf0c3b3d37", size = 260786, upload-time = "2026-01-26T02:45:22.818Z" }, + { url = "https://files.pythonhosted.org/packages/fe/3b/8ec5074bcfc450fe84273713b4b0a0dd47c0249358f5d82eb8104ffe2520/multidict-6.7.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eee46ccb30ff48a1e35bb818cc90846c6be2b68240e42a78599166722cea709", size = 248483, upload-time = "2026-01-26T02:45:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/48/5a/d5a99e3acbca0e29c5d9cba8f92ceb15dce78bab963b308ae692981e3a5d/multidict-6.7.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa263a02f4f2dd2d11a7b1bb4362aa7cb1049f84a9235d31adf63f30143469a0", size = 248403, upload-time = "2026-01-26T02:45:25.982Z" }, + { url = "https://files.pythonhosted.org/packages/35/48/e58cd31f6c7d5102f2a4bf89f96b9cf7e00b6c6f3d04ecc44417c00a5a3c/multidict-6.7.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:2e1425e2f99ec5bd36c15a01b690a1a2456209c5deed58f95469ffb46039ccbb", size = 240315, upload-time = "2026-01-26T02:45:27.487Z" }, + { url = "https://files.pythonhosted.org/packages/94/33/1cd210229559cb90b6786c30676bb0c58249ff42f942765f88793b41fdce/multidict-6.7.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:497394b3239fc6f0e13a78a3e1b61296e72bf1c5f94b4c4eb80b265c37a131cd", size = 245528, upload-time = "2026-01-26T02:45:28.991Z" }, + { url = "https://files.pythonhosted.org/packages/64/f2/6e1107d226278c876c783056b7db43d800bb64c6131cec9c8dfb6903698e/multidict-6.7.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:233b398c29d3f1b9676b4b6f75c518a06fcb2ea0b925119fb2c1bc35c05e1601", size = 258784, upload-time = "2026-01-26T02:45:30.503Z" }, + { url = "https://files.pythonhosted.org/packages/4d/c1/11f664f14d525e4a1b5327a82d4de61a1db604ab34c6603bb3c2cc63ad34/multidict-6.7.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:93b1818e4a6e0930454f0f2af7dfce69307ca03cdcfb3739bf4d91241967b6c1", size = 251980, upload-time = "2026-01-26T02:45:32.603Z" }, + { url = "https://files.pythonhosted.org/packages/e1/9f/75a9ac888121d0c5bbd4ecf4eead45668b1766f6baabfb3b7f66a410e231/multidict-6.7.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f33dc2a3abe9249ea5d8360f969ec7f4142e7ac45ee7014d8f8d5acddf178b7b", size = 243602, upload-time = "2026-01-26T02:45:34.043Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e7/50bf7b004cc8525d80dbbbedfdc7aed3e4c323810890be4413e589074032/multidict-6.7.1-cp314-cp314-win32.whl", hash = "sha256:3ab8b9d8b75aef9df299595d5388b14530839f6422333357af1339443cff777d", size = 40930, upload-time = "2026-01-26T02:45:36.278Z" }, + { url = "https://files.pythonhosted.org/packages/e0/bf/52f25716bbe93745595800f36fb17b73711f14da59ed0bb2eba141bc9f0f/multidict-6.7.1-cp314-cp314-win_amd64.whl", hash = "sha256:5e01429a929600e7dab7b166062d9bb54a5eed752384c7384c968c2afab8f50f", size = 45074, upload-time = "2026-01-26T02:45:37.546Z" }, + { url = "https://files.pythonhosted.org/packages/97/ab/22803b03285fa3a525f48217963da3a65ae40f6a1b6f6cf2768879e208f9/multidict-6.7.1-cp314-cp314-win_arm64.whl", hash = "sha256:4885cb0e817aef5d00a2e8451d4665c1808378dc27c2705f1bf4ef8505c0d2e5", size = 42471, upload-time = "2026-01-26T02:45:38.889Z" }, + { url = "https://files.pythonhosted.org/packages/e0/6d/f9293baa6146ba9507e360ea0292b6422b016907c393e2f63fc40ab7b7b5/multidict-6.7.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:0458c978acd8e6ea53c81eefaddbbee9c6c5e591f41b3f5e8e194780fe026581", size = 82401, upload-time = "2026-01-26T02:45:40.254Z" }, + { url = "https://files.pythonhosted.org/packages/7a/68/53b5494738d83558d87c3c71a486504d8373421c3e0dbb6d0db48ad42ee0/multidict-6.7.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:c0abd12629b0af3cf590982c0b413b1e7395cd4ec026f30986818ab95bfaa94a", size = 48143, upload-time = "2026-01-26T02:45:41.635Z" }, + { url = "https://files.pythonhosted.org/packages/37/e8/5284c53310dcdc99ce5d66563f6e5773531a9b9fe9ec7a615e9bc306b05f/multidict-6.7.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:14525a5f61d7d0c94b368a42cff4c9a4e7ba2d52e2672a7b23d84dc86fb02b0c", size = 46507, upload-time = "2026-01-26T02:45:42.99Z" }, + { url = "https://files.pythonhosted.org/packages/e4/fc/6800d0e5b3875568b4083ecf5f310dcf91d86d52573160834fb4bfcf5e4f/multidict-6.7.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:17307b22c217b4cf05033dabefe68255a534d637c6c9b0cc8382718f87be4262", size = 239358, upload-time = "2026-01-26T02:45:44.376Z" }, + { url = "https://files.pythonhosted.org/packages/41/75/4ad0973179361cdf3a113905e6e088173198349131be2b390f9fa4da5fc6/multidict-6.7.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a7e590ff876a3eaf1c02a4dfe0724b6e69a9e9de6d8f556816f29c496046e59", size = 246884, upload-time = "2026-01-26T02:45:47.167Z" }, + { url = "https://files.pythonhosted.org/packages/c3/9c/095bb28b5da139bd41fb9a5d5caff412584f377914bd8787c2aa98717130/multidict-6.7.1-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5fa6a95dfee63893d80a34758cd0e0c118a30b8dcb46372bf75106c591b77889", size = 225878, upload-time = "2026-01-26T02:45:48.698Z" }, + { url = "https://files.pythonhosted.org/packages/07/d0/c0a72000243756e8f5a277b6b514fa005f2c73d481b7d9e47cd4568aa2e4/multidict-6.7.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a0543217a6a017692aa6ae5cc39adb75e587af0f3a82288b1492eb73dd6cc2a4", size = 253542, upload-time = "2026-01-26T02:45:50.164Z" }, + { url = "https://files.pythonhosted.org/packages/c0/6b/f69da15289e384ecf2a68837ec8b5ad8c33e973aa18b266f50fe55f24b8c/multidict-6.7.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f99fe611c312b3c1c0ace793f92464d8cd263cc3b26b5721950d977b006b6c4d", size = 252403, upload-time = "2026-01-26T02:45:51.779Z" }, + { url = "https://files.pythonhosted.org/packages/a2/76/b9669547afa5a1a25cd93eaca91c0da1c095b06b6d2d8ec25b713588d3a1/multidict-6.7.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9004d8386d133b7e6135679424c91b0b854d2d164af6ea3f289f8f2761064609", size = 244889, upload-time = "2026-01-26T02:45:53.27Z" }, + { url = "https://files.pythonhosted.org/packages/7e/a9/a50d2669e506dad33cfc45b5d574a205587b7b8a5f426f2fbb2e90882588/multidict-6.7.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e628ef0e6859ffd8273c69412a2465c4be4a9517d07261b33334b5ec6f3c7489", size = 241982, upload-time = "2026-01-26T02:45:54.919Z" }, + { url = "https://files.pythonhosted.org/packages/c5/bb/1609558ad8b456b4827d3c5a5b775c93b87878fd3117ed3db3423dfbce1b/multidict-6.7.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:841189848ba629c3552035a6a7f5bf3b02eb304e9fea7492ca220a8eda6b0e5c", size = 232415, upload-time = "2026-01-26T02:45:56.981Z" }, + { url = "https://files.pythonhosted.org/packages/d8/59/6f61039d2aa9261871e03ab9dc058a550d240f25859b05b67fd70f80d4b3/multidict-6.7.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ce1bbd7d780bb5a0da032e095c951f7014d6b0a205f8318308140f1a6aba159e", size = 240337, upload-time = "2026-01-26T02:45:58.698Z" }, + { url = "https://files.pythonhosted.org/packages/a1/29/fdc6a43c203890dc2ae9249971ecd0c41deaedfe00d25cb6564b2edd99eb/multidict-6.7.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b26684587228afed0d50cf804cc71062cc9c1cdf55051c4c6345d372947b268c", size = 248788, upload-time = "2026-01-26T02:46:00.862Z" }, + { url = "https://files.pythonhosted.org/packages/a9/14/a153a06101323e4cf086ecee3faadba52ff71633d471f9685c42e3736163/multidict-6.7.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:9f9af11306994335398293f9958071019e3ab95e9a707dc1383a35613f6abcb9", size = 242842, upload-time = "2026-01-26T02:46:02.824Z" }, + { url = "https://files.pythonhosted.org/packages/41/5f/604ae839e64a4a6efc80db94465348d3b328ee955e37acb24badbcd24d83/multidict-6.7.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b4938326284c4f1224178a560987b6cf8b4d38458b113d9b8c1db1a836e640a2", size = 240237, upload-time = "2026-01-26T02:46:05.898Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/c3a5187bf66f6fb546ff4ab8fb5a077cbdd832d7b1908d4365c7f74a1917/multidict-6.7.1-cp314-cp314t-win32.whl", hash = "sha256:98655c737850c064a65e006a3df7c997cd3b220be4ec8fe26215760b9697d4d7", size = 48008, upload-time = "2026-01-26T02:46:07.468Z" }, + { url = "https://files.pythonhosted.org/packages/0c/f7/addf1087b860ac60e6f382240f64fb99f8bfb532bb06f7c542b83c29ca61/multidict-6.7.1-cp314-cp314t-win_amd64.whl", hash = "sha256:497bde6223c212ba11d462853cfa4f0ae6ef97465033e7dc9940cdb3ab5b48e5", size = 53542, upload-time = "2026-01-26T02:46:08.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/81/4629d0aa32302ef7b2ec65c75a728cc5ff4fa410c50096174c1632e70b3e/multidict-6.7.1-cp314-cp314t-win_arm64.whl", hash = "sha256:2bbd113e0d4af5db41d5ebfe9ccaff89de2120578164f86a5d17d5a576d1e5b2", size = 44719, upload-time = "2026-01-26T02:46:11.146Z" }, + { url = "https://files.pythonhosted.org/packages/81/08/7036c080d7117f28a4af526d794aab6a84463126db031b007717c1a6676e/multidict-6.7.1-py3-none-any.whl", hash = "sha256:55d97cc6dae627efa6a6e548885712d4864b81110ac76fa4e534c03819fa4a56", size = 12319, upload-time = "2026-01-26T02:46:44.004Z" }, ] [[package]] name = "numpy" -version = "2.3.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/76/65/21b3bc86aac7b8f2862db1e808f1ea22b028e30a225a34a5ede9bf8678f2/numpy-2.3.5.tar.gz", hash = "sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0", size = 20584950, upload-time = "2025-11-16T22:52:42.067Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/77/84dd1d2e34d7e2792a236ba180b5e8fcc1e3e414e761ce0253f63d7f572e/numpy-2.3.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10", size = 17034641, upload-time = "2025-11-16T22:49:19.336Z" }, - { url = "https://files.pythonhosted.org/packages/2a/ea/25e26fa5837106cde46ae7d0b667e20f69cbbc0efd64cba8221411ab26ae/numpy-2.3.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218", size = 12528324, upload-time = "2025-11-16T22:49:22.582Z" }, - { url = "https://files.pythonhosted.org/packages/4d/1a/e85f0eea4cf03d6a0228f5c0256b53f2df4bc794706e7df019fc622e47f1/numpy-2.3.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d", size = 5356872, upload-time = "2025-11-16T22:49:25.408Z" }, - { url = "https://files.pythonhosted.org/packages/5c/bb/35ef04afd567f4c989c2060cde39211e4ac5357155c1833bcd1166055c61/numpy-2.3.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5", size = 6893148, upload-time = "2025-11-16T22:49:27.549Z" }, - { url = "https://files.pythonhosted.org/packages/f2/2b/05bbeb06e2dff5eab512dfc678b1cc5ee94d8ac5956a0885c64b6b26252b/numpy-2.3.5-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7", size = 14557282, upload-time = "2025-11-16T22:49:30.964Z" }, - { url = "https://files.pythonhosted.org/packages/65/fb/2b23769462b34398d9326081fad5655198fcf18966fcb1f1e49db44fbf31/numpy-2.3.5-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4", size = 16897903, upload-time = "2025-11-16T22:49:34.191Z" }, - { url = "https://files.pythonhosted.org/packages/ac/14/085f4cf05fc3f1e8aa95e85404e984ffca9b2275a5dc2b1aae18a67538b8/numpy-2.3.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e", size = 16341672, upload-time = "2025-11-16T22:49:37.2Z" }, - { url = "https://files.pythonhosted.org/packages/6f/3b/1f73994904142b2aa290449b3bb99772477b5fd94d787093e4f24f5af763/numpy-2.3.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748", size = 18838896, upload-time = "2025-11-16T22:49:39.727Z" }, - { url = "https://files.pythonhosted.org/packages/cd/b9/cf6649b2124f288309ffc353070792caf42ad69047dcc60da85ee85fea58/numpy-2.3.5-cp311-cp311-win32.whl", hash = "sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c", size = 6563608, upload-time = "2025-11-16T22:49:42.079Z" }, - { url = "https://files.pythonhosted.org/packages/aa/44/9fe81ae1dcc29c531843852e2874080dc441338574ccc4306b39e2ff6e59/numpy-2.3.5-cp311-cp311-win_amd64.whl", hash = "sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c", size = 13078442, upload-time = "2025-11-16T22:49:43.99Z" }, - { url = "https://files.pythonhosted.org/packages/6d/a7/f99a41553d2da82a20a2f22e93c94f928e4490bb447c9ff3c4ff230581d3/numpy-2.3.5-cp311-cp311-win_arm64.whl", hash = "sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa", size = 10458555, upload-time = "2025-11-16T22:49:47.092Z" }, - { url = "https://files.pythonhosted.org/packages/44/37/e669fe6cbb2b96c62f6bbedc6a81c0f3b7362f6a59230b23caa673a85721/numpy-2.3.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e", size = 16733873, upload-time = "2025-11-16T22:49:49.84Z" }, - { url = "https://files.pythonhosted.org/packages/c5/65/df0db6c097892c9380851ab9e44b52d4f7ba576b833996e0080181c0c439/numpy-2.3.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769", size = 12259838, upload-time = "2025-11-16T22:49:52.863Z" }, - { url = "https://files.pythonhosted.org/packages/5b/e1/1ee06e70eb2136797abe847d386e7c0e830b67ad1d43f364dd04fa50d338/numpy-2.3.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5", size = 5088378, upload-time = "2025-11-16T22:49:55.055Z" }, - { url = "https://files.pythonhosted.org/packages/6d/9c/1ca85fb86708724275103b81ec4cf1ac1d08f465368acfc8da7ab545bdae/numpy-2.3.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4", size = 6628559, upload-time = "2025-11-16T22:49:57.371Z" }, - { url = "https://files.pythonhosted.org/packages/74/78/fcd41e5a0ce4f3f7b003da85825acddae6d7ecb60cf25194741b036ca7d6/numpy-2.3.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d", size = 14250702, upload-time = "2025-11-16T22:49:59.632Z" }, - { url = "https://files.pythonhosted.org/packages/b6/23/2a1b231b8ff672b4c450dac27164a8b2ca7d9b7144f9c02d2396518352eb/numpy-2.3.5-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28", size = 16606086, upload-time = "2025-11-16T22:50:02.127Z" }, - { url = "https://files.pythonhosted.org/packages/a0/c5/5ad26fbfbe2012e190cc7d5003e4d874b88bb18861d0829edc140a713021/numpy-2.3.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b", size = 16025985, upload-time = "2025-11-16T22:50:04.536Z" }, - { url = "https://files.pythonhosted.org/packages/d2/fa/dd48e225c46c819288148d9d060b047fd2a6fb1eb37eae25112ee4cb4453/numpy-2.3.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c", size = 18542976, upload-time = "2025-11-16T22:50:07.557Z" }, - { url = "https://files.pythonhosted.org/packages/05/79/ccbd23a75862d95af03d28b5c6901a1b7da4803181513d52f3b86ed9446e/numpy-2.3.5-cp312-cp312-win32.whl", hash = "sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952", size = 6285274, upload-time = "2025-11-16T22:50:10.746Z" }, - { url = "https://files.pythonhosted.org/packages/2d/57/8aeaf160312f7f489dea47ab61e430b5cb051f59a98ae68b7133ce8fa06a/numpy-2.3.5-cp312-cp312-win_amd64.whl", hash = "sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa", size = 12782922, upload-time = "2025-11-16T22:50:12.811Z" }, - { url = "https://files.pythonhosted.org/packages/78/a6/aae5cc2ca78c45e64b9ef22f089141d661516856cf7c8a54ba434576900d/numpy-2.3.5-cp312-cp312-win_arm64.whl", hash = "sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013", size = 10194667, upload-time = "2025-11-16T22:50:16.16Z" }, - { url = "https://files.pythonhosted.org/packages/db/69/9cde09f36da4b5a505341180a3f2e6fadc352fd4d2b7096ce9778db83f1a/numpy-2.3.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff", size = 16728251, upload-time = "2025-11-16T22:50:19.013Z" }, - { url = "https://files.pythonhosted.org/packages/79/fb/f505c95ceddd7027347b067689db71ca80bd5ecc926f913f1a23e65cf09b/numpy-2.3.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188", size = 12254652, upload-time = "2025-11-16T22:50:21.487Z" }, - { url = "https://files.pythonhosted.org/packages/78/da/8c7738060ca9c31b30e9301ee0cf6c5ffdbf889d9593285a1cead337f9a5/numpy-2.3.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0", size = 5083172, upload-time = "2025-11-16T22:50:24.562Z" }, - { url = "https://files.pythonhosted.org/packages/a4/b4/ee5bb2537fb9430fd2ef30a616c3672b991a4129bb1c7dcc42aa0abbe5d7/numpy-2.3.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903", size = 6622990, upload-time = "2025-11-16T22:50:26.47Z" }, - { url = "https://files.pythonhosted.org/packages/95/03/dc0723a013c7d7c19de5ef29e932c3081df1c14ba582b8b86b5de9db7f0f/numpy-2.3.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d", size = 14248902, upload-time = "2025-11-16T22:50:28.861Z" }, - { url = "https://files.pythonhosted.org/packages/f5/10/ca162f45a102738958dcec8023062dad0cbc17d1ab99d68c4e4a6c45fb2b/numpy-2.3.5-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017", size = 16597430, upload-time = "2025-11-16T22:50:31.56Z" }, - { url = "https://files.pythonhosted.org/packages/2a/51/c1e29be863588db58175175f057286900b4b3327a1351e706d5e0f8dd679/numpy-2.3.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf", size = 16024551, upload-time = "2025-11-16T22:50:34.242Z" }, - { url = "https://files.pythonhosted.org/packages/83/68/8236589d4dbb87253d28259d04d9b814ec0ecce7cb1c7fed29729f4c3a78/numpy-2.3.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce", size = 18533275, upload-time = "2025-11-16T22:50:37.651Z" }, - { url = "https://files.pythonhosted.org/packages/40/56/2932d75b6f13465239e3b7b7e511be27f1b8161ca2510854f0b6e521c395/numpy-2.3.5-cp313-cp313-win32.whl", hash = "sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e", size = 6277637, upload-time = "2025-11-16T22:50:40.11Z" }, - { url = "https://files.pythonhosted.org/packages/0c/88/e2eaa6cffb115b85ed7c7c87775cb8bcf0816816bc98ca8dbfa2ee33fe6e/numpy-2.3.5-cp313-cp313-win_amd64.whl", hash = "sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b", size = 12779090, upload-time = "2025-11-16T22:50:42.503Z" }, - { url = "https://files.pythonhosted.org/packages/8f/88/3f41e13a44ebd4034ee17baa384acac29ba6a4fcc2aca95f6f08ca0447d1/numpy-2.3.5-cp313-cp313-win_arm64.whl", hash = "sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae", size = 10194710, upload-time = "2025-11-16T22:50:44.971Z" }, - { url = "https://files.pythonhosted.org/packages/13/cb/71744144e13389d577f867f745b7df2d8489463654a918eea2eeb166dfc9/numpy-2.3.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd", size = 16827292, upload-time = "2025-11-16T22:50:47.715Z" }, - { url = "https://files.pythonhosted.org/packages/71/80/ba9dc6f2a4398e7f42b708a7fdc841bb638d353be255655498edbf9a15a8/numpy-2.3.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f", size = 12378897, upload-time = "2025-11-16T22:50:51.327Z" }, - { url = "https://files.pythonhosted.org/packages/2e/6d/db2151b9f64264bcceccd51741aa39b50150de9b602d98ecfe7e0c4bff39/numpy-2.3.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a", size = 5207391, upload-time = "2025-11-16T22:50:54.542Z" }, - { url = "https://files.pythonhosted.org/packages/80/ae/429bacace5ccad48a14c4ae5332f6aa8ab9f69524193511d60ccdfdc65fa/numpy-2.3.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139", size = 6721275, upload-time = "2025-11-16T22:50:56.794Z" }, - { url = "https://files.pythonhosted.org/packages/74/5b/1919abf32d8722646a38cd527bc3771eb229a32724ee6ba340ead9b92249/numpy-2.3.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e", size = 14306855, upload-time = "2025-11-16T22:50:59.208Z" }, - { url = "https://files.pythonhosted.org/packages/a5/87/6831980559434973bebc30cd9c1f21e541a0f2b0c280d43d3afd909b66d0/numpy-2.3.5-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9", size = 16657359, upload-time = "2025-11-16T22:51:01.991Z" }, - { url = "https://files.pythonhosted.org/packages/dd/91/c797f544491ee99fd00495f12ebb7802c440c1915811d72ac5b4479a3356/numpy-2.3.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946", size = 16093374, upload-time = "2025-11-16T22:51:05.291Z" }, - { url = "https://files.pythonhosted.org/packages/74/a6/54da03253afcbe7a72785ec4da9c69fb7a17710141ff9ac5fcb2e32dbe64/numpy-2.3.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1", size = 18594587, upload-time = "2025-11-16T22:51:08.585Z" }, - { url = "https://files.pythonhosted.org/packages/80/e9/aff53abbdd41b0ecca94285f325aff42357c6b5abc482a3fcb4994290b18/numpy-2.3.5-cp313-cp313t-win32.whl", hash = "sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3", size = 6405940, upload-time = "2025-11-16T22:51:11.541Z" }, - { url = "https://files.pythonhosted.org/packages/d5/81/50613fec9d4de5480de18d4f8ef59ad7e344d497edbef3cfd80f24f98461/numpy-2.3.5-cp313-cp313t-win_amd64.whl", hash = "sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234", size = 12920341, upload-time = "2025-11-16T22:51:14.312Z" }, - { url = "https://files.pythonhosted.org/packages/bb/ab/08fd63b9a74303947f34f0bd7c5903b9c5532c2d287bead5bdf4c556c486/numpy-2.3.5-cp313-cp313t-win_arm64.whl", hash = "sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7", size = 10262507, upload-time = "2025-11-16T22:51:16.846Z" }, - { url = "https://files.pythonhosted.org/packages/ba/97/1a914559c19e32d6b2e233cf9a6a114e67c856d35b1d6babca571a3e880f/numpy-2.3.5-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82", size = 16735706, upload-time = "2025-11-16T22:51:19.558Z" }, - { url = "https://files.pythonhosted.org/packages/57/d4/51233b1c1b13ecd796311216ae417796b88b0616cfd8a33ae4536330748a/numpy-2.3.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0", size = 12264507, upload-time = "2025-11-16T22:51:22.492Z" }, - { url = "https://files.pythonhosted.org/packages/45/98/2fe46c5c2675b8306d0b4a3ec3494273e93e1226a490f766e84298576956/numpy-2.3.5-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63", size = 5093049, upload-time = "2025-11-16T22:51:25.171Z" }, - { url = "https://files.pythonhosted.org/packages/ce/0e/0698378989bb0ac5f1660c81c78ab1fe5476c1a521ca9ee9d0710ce54099/numpy-2.3.5-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9", size = 6626603, upload-time = "2025-11-16T22:51:27Z" }, - { url = "https://files.pythonhosted.org/packages/5e/a6/9ca0eecc489640615642a6cbc0ca9e10df70df38c4d43f5a928ff18d8827/numpy-2.3.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b", size = 14262696, upload-time = "2025-11-16T22:51:29.402Z" }, - { url = "https://files.pythonhosted.org/packages/c8/f6/07ec185b90ec9d7217a00eeeed7383b73d7e709dae2a9a021b051542a708/numpy-2.3.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520", size = 16597350, upload-time = "2025-11-16T22:51:32.167Z" }, - { url = "https://files.pythonhosted.org/packages/75/37/164071d1dde6a1a84c9b8e5b414fa127981bad47adf3a6b7e23917e52190/numpy-2.3.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c", size = 16040190, upload-time = "2025-11-16T22:51:35.403Z" }, - { url = "https://files.pythonhosted.org/packages/08/3c/f18b82a406b04859eb026d204e4e1773eb41c5be58410f41ffa511d114ae/numpy-2.3.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8", size = 18536749, upload-time = "2025-11-16T22:51:39.698Z" }, - { url = "https://files.pythonhosted.org/packages/40/79/f82f572bf44cf0023a2fe8588768e23e1592585020d638999f15158609e1/numpy-2.3.5-cp314-cp314-win32.whl", hash = "sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248", size = 6335432, upload-time = "2025-11-16T22:51:42.476Z" }, - { url = "https://files.pythonhosted.org/packages/a3/2e/235b4d96619931192c91660805e5e49242389742a7a82c27665021db690c/numpy-2.3.5-cp314-cp314-win_amd64.whl", hash = "sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e", size = 12919388, upload-time = "2025-11-16T22:51:45.275Z" }, - { url = "https://files.pythonhosted.org/packages/07/2b/29fd75ce45d22a39c61aad74f3d718e7ab67ccf839ca8b60866054eb15f8/numpy-2.3.5-cp314-cp314-win_arm64.whl", hash = "sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2", size = 10476651, upload-time = "2025-11-16T22:51:47.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/e1/f6a721234ebd4d87084cfa68d081bcba2f5cfe1974f7de4e0e8b9b2a2ba1/numpy-2.3.5-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41", size = 16834503, upload-time = "2025-11-16T22:51:50.443Z" }, - { url = "https://files.pythonhosted.org/packages/5c/1c/baf7ffdc3af9c356e1c135e57ab7cf8d247931b9554f55c467efe2c69eff/numpy-2.3.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad", size = 12381612, upload-time = "2025-11-16T22:51:53.609Z" }, - { url = "https://files.pythonhosted.org/packages/74/91/f7f0295151407ddc9ba34e699013c32c3c91944f9b35fcf9281163dc1468/numpy-2.3.5-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39", size = 5210042, upload-time = "2025-11-16T22:51:56.213Z" }, - { url = "https://files.pythonhosted.org/packages/2e/3b/78aebf345104ec50dd50a4d06ddeb46a9ff5261c33bcc58b1c4f12f85ec2/numpy-2.3.5-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20", size = 6724502, upload-time = "2025-11-16T22:51:58.584Z" }, - { url = "https://files.pythonhosted.org/packages/02/c6/7c34b528740512e57ef1b7c8337ab0b4f0bddf34c723b8996c675bc2bc91/numpy-2.3.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52", size = 14308962, upload-time = "2025-11-16T22:52:01.698Z" }, - { url = "https://files.pythonhosted.org/packages/80/35/09d433c5262bc32d725bafc619e095b6a6651caf94027a03da624146f655/numpy-2.3.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b", size = 16655054, upload-time = "2025-11-16T22:52:04.267Z" }, - { url = "https://files.pythonhosted.org/packages/7a/ab/6a7b259703c09a88804fa2430b43d6457b692378f6b74b356155283566ac/numpy-2.3.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3", size = 16091613, upload-time = "2025-11-16T22:52:08.651Z" }, - { url = "https://files.pythonhosted.org/packages/c2/88/330da2071e8771e60d1038166ff9d73f29da37b01ec3eb43cb1427464e10/numpy-2.3.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227", size = 18591147, upload-time = "2025-11-16T22:52:11.453Z" }, - { url = "https://files.pythonhosted.org/packages/51/41/851c4b4082402d9ea860c3626db5d5df47164a712cb23b54be028b184c1c/numpy-2.3.5-cp314-cp314t-win32.whl", hash = "sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5", size = 6479806, upload-time = "2025-11-16T22:52:14.641Z" }, - { url = "https://files.pythonhosted.org/packages/90/30/d48bde1dfd93332fa557cff1972fbc039e055a52021fbef4c2c4b1eefd17/numpy-2.3.5-cp314-cp314t-win_amd64.whl", hash = "sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf", size = 13105760, upload-time = "2025-11-16T22:52:17.975Z" }, - { url = "https://files.pythonhosted.org/packages/2d/fd/4b5eb0b3e888d86aee4d198c23acec7d214baaf17ea93c1adec94c9518b9/numpy-2.3.5-cp314-cp314t-win_arm64.whl", hash = "sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42", size = 10545459, upload-time = "2025-11-16T22:52:20.55Z" }, - { url = "https://files.pythonhosted.org/packages/c6/65/f9dea8e109371ade9c782b4e4756a82edf9d3366bca495d84d79859a0b79/numpy-2.3.5-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310", size = 16910689, upload-time = "2025-11-16T22:52:23.247Z" }, - { url = "https://files.pythonhosted.org/packages/00/4f/edb00032a8fb92ec0a679d3830368355da91a69cab6f3e9c21b64d0bb986/numpy-2.3.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c", size = 12457053, upload-time = "2025-11-16T22:52:26.367Z" }, - { url = "https://files.pythonhosted.org/packages/16/a4/e8a53b5abd500a63836a29ebe145fc1ab1f2eefe1cfe59276020373ae0aa/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18", size = 5285635, upload-time = "2025-11-16T22:52:29.266Z" }, - { url = "https://files.pythonhosted.org/packages/a3/2f/37eeb9014d9c8b3e9c55bc599c68263ca44fdbc12a93e45a21d1d56df737/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff", size = 6801770, upload-time = "2025-11-16T22:52:31.421Z" }, - { url = "https://files.pythonhosted.org/packages/7d/e4/68d2f474df2cb671b2b6c2986a02e520671295647dad82484cde80ca427b/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb", size = 14391768, upload-time = "2025-11-16T22:52:33.593Z" }, - { url = "https://files.pythonhosted.org/packages/b8/50/94ccd8a2b141cb50651fddd4f6a48874acb3c91c8f0842b08a6afc4b0b21/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7", size = 16729263, upload-time = "2025-11-16T22:52:36.369Z" }, - { url = "https://files.pythonhosted.org/packages/2d/ee/346fa473e666fe14c52fcdd19ec2424157290a032d4c41f98127bfb31ac7/numpy-2.3.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425", size = 12967213, upload-time = "2025-11-16T22:52:39.38Z" }, +version = "2.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/fd/0005efbd0af48e55eb3c7208af93f2862d4b1a56cd78e84309a2d959208d/numpy-2.4.2.tar.gz", hash = "sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae", size = 20723651, upload-time = "2026-01-31T23:13:10.135Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/44/71852273146957899753e69986246d6a176061ea183407e95418c2aa4d9a/numpy-2.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7e88598032542bd49af7c4747541422884219056c268823ef6e5e89851c8825", size = 16955478, upload-time = "2026-01-31T23:10:25.623Z" }, + { url = "https://files.pythonhosted.org/packages/74/41/5d17d4058bd0cd96bcbd4d9ff0fb2e21f52702aab9a72e4a594efa18692f/numpy-2.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7edc794af8b36ca37ef5fcb5e0d128c7e0595c7b96a2318d1badb6fcd8ee86b1", size = 14965467, upload-time = "2026-01-31T23:10:28.186Z" }, + { url = "https://files.pythonhosted.org/packages/49/48/fb1ce8136c19452ed15f033f8aee91d5defe515094e330ce368a0647846f/numpy-2.4.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:6e9f61981ace1360e42737e2bae58b27bf28a1b27e781721047d84bd754d32e7", size = 5475172, upload-time = "2026-01-31T23:10:30.848Z" }, + { url = "https://files.pythonhosted.org/packages/40/a9/3feb49f17bbd1300dd2570432961f5c8a4ffeff1db6f02c7273bd020a4c9/numpy-2.4.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:cb7bbb88aa74908950d979eeaa24dbdf1a865e3c7e45ff0121d8f70387b55f73", size = 6805145, upload-time = "2026-01-31T23:10:32.352Z" }, + { url = "https://files.pythonhosted.org/packages/3f/39/fdf35cbd6d6e2fcad42fcf85ac04a85a0d0fbfbf34b30721c98d602fd70a/numpy-2.4.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f069069931240b3fc703f1e23df63443dbd6390614c8c44a87d96cd0ec81eb1", size = 15966084, upload-time = "2026-01-31T23:10:34.502Z" }, + { url = "https://files.pythonhosted.org/packages/1b/46/6fa4ea94f1ddf969b2ee941290cca6f1bfac92b53c76ae5f44afe17ceb69/numpy-2.4.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c02ef4401a506fb60b411467ad501e1429a3487abca4664871d9ae0b46c8ba32", size = 16899477, upload-time = "2026-01-31T23:10:37.075Z" }, + { url = "https://files.pythonhosted.org/packages/09/a1/2a424e162b1a14a5bd860a464ab4e07513916a64ab1683fae262f735ccd2/numpy-2.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2653de5c24910e49c2b106499803124dde62a5a1fe0eedeaecf4309a5f639390", size = 17323429, upload-time = "2026-01-31T23:10:39.704Z" }, + { url = "https://files.pythonhosted.org/packages/ce/a2/73014149ff250628df72c58204822ac01d768697913881aacf839ff78680/numpy-2.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1ae241bbfc6ae276f94a170b14785e561cb5e7f626b6688cf076af4110887413", size = 18635109, upload-time = "2026-01-31T23:10:41.924Z" }, + { url = "https://files.pythonhosted.org/packages/6c/0c/73e8be2f1accd56df74abc1c5e18527822067dced5ec0861b5bb882c2ce0/numpy-2.4.2-cp311-cp311-win32.whl", hash = "sha256:df1b10187212b198dd45fa943d8985a3c8cf854aed4923796e0e019e113a1bda", size = 6237915, upload-time = "2026-01-31T23:10:45.26Z" }, + { url = "https://files.pythonhosted.org/packages/76/ae/e0265e0163cf127c24c3969d29f1c4c64551a1e375d95a13d32eab25d364/numpy-2.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:b9c618d56a29c9cb1c4da979e9899be7578d2e0b3c24d52079c166324c9e8695", size = 12607972, upload-time = "2026-01-31T23:10:47.021Z" }, + { url = "https://files.pythonhosted.org/packages/29/a5/c43029af9b8014d6ea157f192652c50042e8911f4300f8f6ed3336bf437f/numpy-2.4.2-cp311-cp311-win_arm64.whl", hash = "sha256:47c5a6ed21d9452b10227e5e8a0e1c22979811cad7dcc19d8e3e2fb8fa03f1a3", size = 10485763, upload-time = "2026-01-31T23:10:50.087Z" }, + { url = "https://files.pythonhosted.org/packages/51/6e/6f394c9c77668153e14d4da83bcc247beb5952f6ead7699a1a2992613bea/numpy-2.4.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:21982668592194c609de53ba4933a7471880ccbaadcc52352694a59ecc860b3a", size = 16667963, upload-time = "2026-01-31T23:10:52.147Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f8/55483431f2b2fd015ae6ed4fe62288823ce908437ed49db5a03d15151678/numpy-2.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40397bda92382fcec844066efb11f13e1c9a3e2a8e8f318fb72ed8b6db9f60f1", size = 14693571, upload-time = "2026-01-31T23:10:54.789Z" }, + { url = "https://files.pythonhosted.org/packages/2f/20/18026832b1845cdc82248208dd929ca14c9d8f2bac391f67440707fff27c/numpy-2.4.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:b3a24467af63c67829bfaa61eecf18d5432d4f11992688537be59ecd6ad32f5e", size = 5203469, upload-time = "2026-01-31T23:10:57.343Z" }, + { url = "https://files.pythonhosted.org/packages/7d/33/2eb97c8a77daaba34eaa3fa7241a14ac5f51c46a6bd5911361b644c4a1e2/numpy-2.4.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:805cc8de9fd6e7a22da5aed858e0ab16be5a4db6c873dde1d7451c541553aa27", size = 6550820, upload-time = "2026-01-31T23:10:59.429Z" }, + { url = "https://files.pythonhosted.org/packages/b1/91/b97fdfd12dc75b02c44e26c6638241cc004d4079a0321a69c62f51470c4c/numpy-2.4.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d82351358ffbcdcd7b686b90742a9b86632d6c1c051016484fa0b326a0a1548", size = 15663067, upload-time = "2026-01-31T23:11:01.291Z" }, + { url = "https://files.pythonhosted.org/packages/f5/c6/a18e59f3f0b8071cc85cbc8d80cd02d68aa9710170b2553a117203d46936/numpy-2.4.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e35d3e0144137d9fdae62912e869136164534d64a169f86438bc9561b6ad49f", size = 16619782, upload-time = "2026-01-31T23:11:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/b7/83/9751502164601a79e18847309f5ceec0b1446d7b6aa12305759b72cf98b2/numpy-2.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adb6ed2ad29b9e15321d167d152ee909ec73395901b70936f029c3bc6d7f4460", size = 17013128, upload-time = "2026-01-31T23:11:05.913Z" }, + { url = "https://files.pythonhosted.org/packages/61/c4/c4066322256ec740acc1c8923a10047818691d2f8aec254798f3dd90f5f2/numpy-2.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8906e71fd8afcb76580404e2a950caef2685df3d2a57fe82a86ac8d33cc007ba", size = 18345324, upload-time = "2026-01-31T23:11:08.248Z" }, + { url = "https://files.pythonhosted.org/packages/ab/af/6157aa6da728fa4525a755bfad486ae7e3f76d4c1864138003eb84328497/numpy-2.4.2-cp312-cp312-win32.whl", hash = "sha256:ec055f6dae239a6299cace477b479cca2fc125c5675482daf1dd886933a1076f", size = 5960282, upload-time = "2026-01-31T23:11:10.497Z" }, + { url = "https://files.pythonhosted.org/packages/92/0f/7ceaaeaacb40567071e94dbf2c9480c0ae453d5bb4f52bea3892c39dc83c/numpy-2.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:209fae046e62d0ce6435fcfe3b1a10537e858249b3d9b05829e2a05218296a85", size = 12314210, upload-time = "2026-01-31T23:11:12.176Z" }, + { url = "https://files.pythonhosted.org/packages/2f/a3/56c5c604fae6dd40fa2ed3040d005fca97e91bd320d232ac9931d77ba13c/numpy-2.4.2-cp312-cp312-win_arm64.whl", hash = "sha256:fbde1b0c6e81d56f5dccd95dd4a711d9b95df1ae4009a60887e56b27e8d903fa", size = 10220171, upload-time = "2026-01-31T23:11:14.684Z" }, + { url = "https://files.pythonhosted.org/packages/a1/22/815b9fe25d1d7ae7d492152adbc7226d3eff731dffc38fe970589fcaaa38/numpy-2.4.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25f2059807faea4b077a2b6837391b5d830864b3543627f381821c646f31a63c", size = 16663696, upload-time = "2026-01-31T23:11:17.516Z" }, + { url = "https://files.pythonhosted.org/packages/09/f0/817d03a03f93ba9c6c8993de509277d84e69f9453601915e4a69554102a1/numpy-2.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bd3a7a9f5847d2fb8c2c6d1c862fa109c31a9abeca1a3c2bd5a64572955b2979", size = 14688322, upload-time = "2026-01-31T23:11:19.883Z" }, + { url = "https://files.pythonhosted.org/packages/da/b4/f805ab79293c728b9a99438775ce51885fd4f31b76178767cfc718701a39/numpy-2.4.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8e4549f8a3c6d13d55041925e912bfd834285ef1dd64d6bc7d542583355e2e98", size = 5198157, upload-time = "2026-01-31T23:11:22.375Z" }, + { url = "https://files.pythonhosted.org/packages/74/09/826e4289844eccdcd64aac27d13b0fd3f32039915dd5b9ba01baae1f436c/numpy-2.4.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:aea4f66ff44dfddf8c2cffd66ba6538c5ec67d389285292fe428cb2c738c8aef", size = 6546330, upload-time = "2026-01-31T23:11:23.958Z" }, + { url = "https://files.pythonhosted.org/packages/19/fb/cbfdbfa3057a10aea5422c558ac57538e6acc87ec1669e666d32ac198da7/numpy-2.4.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3cd545784805de05aafe1dde61752ea49a359ccba9760c1e5d1c88a93bbf2b7", size = 15660968, upload-time = "2026-01-31T23:11:25.713Z" }, + { url = "https://files.pythonhosted.org/packages/04/dc/46066ce18d01645541f0186877377b9371b8fa8017fa8262002b4ef22612/numpy-2.4.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0d9b7c93578baafcbc5f0b83eaf17b79d345c6f36917ba0c67f45226911d499", size = 16607311, upload-time = "2026-01-31T23:11:28.117Z" }, + { url = "https://files.pythonhosted.org/packages/14/d9/4b5adfc39a43fa6bf918c6d544bc60c05236cc2f6339847fc5b35e6cb5b0/numpy-2.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f74f0f7779cc7ae07d1810aab8ac6b1464c3eafb9e283a40da7309d5e6e48fbb", size = 17012850, upload-time = "2026-01-31T23:11:30.888Z" }, + { url = "https://files.pythonhosted.org/packages/b7/20/adb6e6adde6d0130046e6fdfb7675cc62bc2f6b7b02239a09eb58435753d/numpy-2.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c7ac672d699bf36275c035e16b65539931347d68b70667d28984c9fb34e07fa7", size = 18334210, upload-time = "2026-01-31T23:11:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/78/0e/0a73b3dff26803a8c02baa76398015ea2a5434d9b8265a7898a6028c1591/numpy-2.4.2-cp313-cp313-win32.whl", hash = "sha256:8e9afaeb0beff068b4d9cd20d322ba0ee1cecfb0b08db145e4ab4dd44a6b5110", size = 5958199, upload-time = "2026-01-31T23:11:35.385Z" }, + { url = "https://files.pythonhosted.org/packages/43/bc/6352f343522fcb2c04dbaf94cb30cca6fd32c1a750c06ad6231b4293708c/numpy-2.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:7df2de1e4fba69a51c06c28f5a3de36731eb9639feb8e1cf7e4a7b0daf4cf622", size = 12310848, upload-time = "2026-01-31T23:11:38.001Z" }, + { url = "https://files.pythonhosted.org/packages/6e/8d/6da186483e308da5da1cc6918ce913dcfe14ffde98e710bfeff2a6158d4e/numpy-2.4.2-cp313-cp313-win_arm64.whl", hash = "sha256:0fece1d1f0a89c16b03442eae5c56dc0be0c7883b5d388e0c03f53019a4bfd71", size = 10221082, upload-time = "2026-01-31T23:11:40.392Z" }, + { url = "https://files.pythonhosted.org/packages/25/a1/9510aa43555b44781968935c7548a8926274f815de42ad3997e9e83680dd/numpy-2.4.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5633c0da313330fd20c484c78cdd3f9b175b55e1a766c4a174230c6b70ad8262", size = 14815866, upload-time = "2026-01-31T23:11:42.495Z" }, + { url = "https://files.pythonhosted.org/packages/36/30/6bbb5e76631a5ae46e7923dd16ca9d3f1c93cfa8d4ed79a129814a9d8db3/numpy-2.4.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d9f64d786b3b1dd742c946c42d15b07497ed14af1a1f3ce840cce27daa0ce913", size = 5325631, upload-time = "2026-01-31T23:11:44.7Z" }, + { url = "https://files.pythonhosted.org/packages/46/00/3a490938800c1923b567b3a15cd17896e68052e2145d8662aaf3e1ffc58f/numpy-2.4.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:b21041e8cb6a1eb5312dd1d2f80a94d91efffb7a06b70597d44f1bd2dfc315ab", size = 6646254, upload-time = "2026-01-31T23:11:46.341Z" }, + { url = "https://files.pythonhosted.org/packages/d3/e9/fac0890149898a9b609caa5af7455a948b544746e4b8fe7c212c8edd71f8/numpy-2.4.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00ab83c56211a1d7c07c25e3217ea6695e50a3e2f255053686b081dc0b091a82", size = 15720138, upload-time = "2026-01-31T23:11:48.082Z" }, + { url = "https://files.pythonhosted.org/packages/ea/5c/08887c54e68e1e28df53709f1893ce92932cc6f01f7c3d4dc952f61ffd4e/numpy-2.4.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2fb882da679409066b4603579619341c6d6898fc83a8995199d5249f986e8e8f", size = 16655398, upload-time = "2026-01-31T23:11:50.293Z" }, + { url = "https://files.pythonhosted.org/packages/4d/89/253db0fa0e66e9129c745e4ef25631dc37d5f1314dad2b53e907b8538e6d/numpy-2.4.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:66cb9422236317f9d44b67b4d18f44efe6e9c7f8794ac0462978513359461554", size = 17079064, upload-time = "2026-01-31T23:11:52.927Z" }, + { url = "https://files.pythonhosted.org/packages/2a/d5/cbade46ce97c59c6c3da525e8d95b7abe8a42974a1dc5c1d489c10433e88/numpy-2.4.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0f01dcf33e73d80bd8dc0f20a71303abbafa26a19e23f6b68d1aa9990af90257", size = 18379680, upload-time = "2026-01-31T23:11:55.22Z" }, + { url = "https://files.pythonhosted.org/packages/40/62/48f99ae172a4b63d981babe683685030e8a3df4f246c893ea5c6ef99f018/numpy-2.4.2-cp313-cp313t-win32.whl", hash = "sha256:52b913ec40ff7ae845687b0b34d8d93b60cb66dcee06996dd5c99f2fc9328657", size = 6082433, upload-time = "2026-01-31T23:11:58.096Z" }, + { url = "https://files.pythonhosted.org/packages/07/38/e054a61cfe48ad9f1ed0d188e78b7e26859d0b60ef21cd9de4897cdb5326/numpy-2.4.2-cp313-cp313t-win_amd64.whl", hash = "sha256:5eea80d908b2c1f91486eb95b3fb6fab187e569ec9752ab7d9333d2e66bf2d6b", size = 12451181, upload-time = "2026-01-31T23:11:59.782Z" }, + { url = "https://files.pythonhosted.org/packages/6e/a4/a05c3a6418575e185dd84d0b9680b6bb2e2dc3e4202f036b7b4e22d6e9dc/numpy-2.4.2-cp313-cp313t-win_arm64.whl", hash = "sha256:fd49860271d52127d61197bb50b64f58454e9f578cb4b2c001a6de8b1f50b0b1", size = 10290756, upload-time = "2026-01-31T23:12:02.438Z" }, + { url = "https://files.pythonhosted.org/packages/18/88/b7df6050bf18fdcfb7046286c6535cabbdd2064a3440fca3f069d319c16e/numpy-2.4.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:444be170853f1f9d528428eceb55f12918e4fda5d8805480f36a002f1415e09b", size = 16663092, upload-time = "2026-01-31T23:12:04.521Z" }, + { url = "https://files.pythonhosted.org/packages/25/7a/1fee4329abc705a469a4afe6e69b1ef7e915117747886327104a8493a955/numpy-2.4.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d1240d50adff70c2a88217698ca844723068533f3f5c5fa6ee2e3220e3bdb000", size = 14698770, upload-time = "2026-01-31T23:12:06.96Z" }, + { url = "https://files.pythonhosted.org/packages/fb/0b/f9e49ba6c923678ad5bc38181c08ac5e53b7a5754dbca8e581aa1a56b1ff/numpy-2.4.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:7cdde6de52fb6664b00b056341265441192d1291c130e99183ec0d4b110ff8b1", size = 5208562, upload-time = "2026-01-31T23:12:09.632Z" }, + { url = "https://files.pythonhosted.org/packages/7d/12/d7de8f6f53f9bb76997e5e4c069eda2051e3fe134e9181671c4391677bb2/numpy-2.4.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:cda077c2e5b780200b6b3e09d0b42205a3d1c68f30c6dceb90401c13bff8fe74", size = 6543710, upload-time = "2026-01-31T23:12:11.969Z" }, + { url = "https://files.pythonhosted.org/packages/09/63/c66418c2e0268a31a4cf8a8b512685748200f8e8e8ec6c507ce14e773529/numpy-2.4.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d30291931c915b2ab5717c2974bb95ee891a1cf22ebc16a8006bd59cd210d40a", size = 15677205, upload-time = "2026-01-31T23:12:14.33Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6c/7f237821c9642fb2a04d2f1e88b4295677144ca93285fd76eff3bcba858d/numpy-2.4.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bba37bc29d4d85761deed3954a1bc62be7cf462b9510b51d367b769a8c8df325", size = 16611738, upload-time = "2026-01-31T23:12:16.525Z" }, + { url = "https://files.pythonhosted.org/packages/c2/a7/39c4cdda9f019b609b5c473899d87abff092fc908cfe4d1ecb2fcff453b0/numpy-2.4.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b2f0073ed0868db1dcd86e052d37279eef185b9c8db5bf61f30f46adac63c909", size = 17028888, upload-time = "2026-01-31T23:12:19.306Z" }, + { url = "https://files.pythonhosted.org/packages/da/b3/e84bb64bdfea967cc10950d71090ec2d84b49bc691df0025dddb7c26e8e3/numpy-2.4.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7f54844851cdb630ceb623dcec4db3240d1ac13d4990532446761baede94996a", size = 18339556, upload-time = "2026-01-31T23:12:21.816Z" }, + { url = "https://files.pythonhosted.org/packages/88/f5/954a291bc1192a27081706862ac62bb5920fbecfbaa302f64682aa90beed/numpy-2.4.2-cp314-cp314-win32.whl", hash = "sha256:12e26134a0331d8dbd9351620f037ec470b7c75929cb8a1537f6bfe411152a1a", size = 6006899, upload-time = "2026-01-31T23:12:24.14Z" }, + { url = "https://files.pythonhosted.org/packages/05/cb/eff72a91b2efdd1bc98b3b8759f6a1654aa87612fc86e3d87d6fe4f948c4/numpy-2.4.2-cp314-cp314-win_amd64.whl", hash = "sha256:068cdb2d0d644cdb45670810894f6a0600797a69c05f1ac478e8d31670b8ee75", size = 12443072, upload-time = "2026-01-31T23:12:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/37/75/62726948db36a56428fce4ba80a115716dc4fad6a3a4352487f8bb950966/numpy-2.4.2-cp314-cp314-win_arm64.whl", hash = "sha256:6ed0be1ee58eef41231a5c943d7d1375f093142702d5723ca2eb07db9b934b05", size = 10494886, upload-time = "2026-01-31T23:12:28.488Z" }, + { url = "https://files.pythonhosted.org/packages/36/2f/ee93744f1e0661dc267e4b21940870cabfae187c092e1433b77b09b50ac4/numpy-2.4.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:98f16a80e917003a12c0580f97b5f875853ebc33e2eaa4bccfc8201ac6869308", size = 14818567, upload-time = "2026-01-31T23:12:30.709Z" }, + { url = "https://files.pythonhosted.org/packages/a7/24/6535212add7d76ff938d8bdc654f53f88d35cddedf807a599e180dcb8e66/numpy-2.4.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:20abd069b9cda45874498b245c8015b18ace6de8546bf50dfa8cea1696ed06ef", size = 5328372, upload-time = "2026-01-31T23:12:32.962Z" }, + { url = "https://files.pythonhosted.org/packages/5e/9d/c48f0a035725f925634bf6b8994253b43f2047f6778a54147d7e213bc5a7/numpy-2.4.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e98c97502435b53741540a5717a6749ac2ada901056c7db951d33e11c885cc7d", size = 6649306, upload-time = "2026-01-31T23:12:34.797Z" }, + { url = "https://files.pythonhosted.org/packages/81/05/7c73a9574cd4a53a25907bad38b59ac83919c0ddc8234ec157f344d57d9a/numpy-2.4.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da6cad4e82cb893db4b69105c604d805e0c3ce11501a55b5e9f9083b47d2ffe8", size = 15722394, upload-time = "2026-01-31T23:12:36.565Z" }, + { url = "https://files.pythonhosted.org/packages/35/fa/4de10089f21fc7d18442c4a767ab156b25c2a6eaf187c0db6d9ecdaeb43f/numpy-2.4.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e4424677ce4b47fe73c8b5556d876571f7c6945d264201180db2dc34f676ab5", size = 16653343, upload-time = "2026-01-31T23:12:39.188Z" }, + { url = "https://files.pythonhosted.org/packages/b8/f9/d33e4ffc857f3763a57aa85650f2e82486832d7492280ac21ba9efda80da/numpy-2.4.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2b8f157c8a6f20eb657e240f8985cc135598b2b46985c5bccbde7616dc9c6b1e", size = 17078045, upload-time = "2026-01-31T23:12:42.041Z" }, + { url = "https://files.pythonhosted.org/packages/c8/b8/54bdb43b6225badbea6389fa038c4ef868c44f5890f95dd530a218706da3/numpy-2.4.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5daf6f3914a733336dab21a05cdec343144600e964d2fcdabaac0c0269874b2a", size = 18380024, upload-time = "2026-01-31T23:12:44.331Z" }, + { url = "https://files.pythonhosted.org/packages/a5/55/6e1a61ded7af8df04016d81b5b02daa59f2ea9252ee0397cb9f631efe9e5/numpy-2.4.2-cp314-cp314t-win32.whl", hash = "sha256:8c50dd1fc8826f5b26a5ee4d77ca55d88a895f4e4819c7ecc2a9f5905047a443", size = 6153937, upload-time = "2026-01-31T23:12:47.229Z" }, + { url = "https://files.pythonhosted.org/packages/45/aa/fa6118d1ed6d776b0983f3ceac9b1a5558e80df9365b1c3aa6d42bf9eee4/numpy-2.4.2-cp314-cp314t-win_amd64.whl", hash = "sha256:fcf92bee92742edd401ba41135185866f7026c502617f422eb432cfeca4fe236", size = 12631844, upload-time = "2026-01-31T23:12:48.997Z" }, + { url = "https://files.pythonhosted.org/packages/32/0a/2ec5deea6dcd158f254a7b372fb09cfba5719419c8d66343bab35237b3fb/numpy-2.4.2-cp314-cp314t-win_arm64.whl", hash = "sha256:1f92f53998a17265194018d1cc321b2e96e900ca52d54c7c77837b71b9465181", size = 10565379, upload-time = "2026-01-31T23:12:51.345Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f8/50e14d36d915ef64d8f8bc4a087fc8264d82c785eda6711f80ab7e620335/numpy-2.4.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:89f7268c009bc492f506abd6f5265defa7cb3f7487dc21d357c3d290add45082", size = 16833179, upload-time = "2026-01-31T23:12:53.5Z" }, + { url = "https://files.pythonhosted.org/packages/17/17/809b5cad63812058a8189e91a1e2d55a5a18fd04611dbad244e8aeae465c/numpy-2.4.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6dee3bb76aa4009d5a912180bf5b2de012532998d094acee25d9cb8dee3e44a", size = 14889755, upload-time = "2026-01-31T23:12:55.933Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ea/181b9bcf7627fc8371720316c24db888dcb9829b1c0270abf3d288b2e29b/numpy-2.4.2-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:cd2bd2bbed13e213d6b55dc1d035a4f91748a7d3edc9480c13898b0353708920", size = 5399500, upload-time = "2026-01-31T23:12:58.671Z" }, + { url = "https://files.pythonhosted.org/packages/33/9f/413adf3fc955541ff5536b78fcf0754680b3c6d95103230252a2c9408d23/numpy-2.4.2-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:cf28c0c1d4c4bf00f509fa7eb02c58d7caf221b50b467bcb0d9bbf1584d5c821", size = 6714252, upload-time = "2026-01-31T23:13:00.518Z" }, + { url = "https://files.pythonhosted.org/packages/91/da/643aad274e29ccbdf42ecd94dafe524b81c87bcb56b83872d54827f10543/numpy-2.4.2-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e04ae107ac591763a47398bb45b568fc38f02dbc4aa44c063f67a131f99346cb", size = 15797142, upload-time = "2026-01-31T23:13:02.219Z" }, + { url = "https://files.pythonhosted.org/packages/66/27/965b8525e9cb5dc16481b30a1b3c21e50c7ebf6e9dbd48d0c4d0d5089c7e/numpy-2.4.2-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:602f65afdef699cda27ec0b9224ae5dc43e328f4c24c689deaf77133dbee74d0", size = 16727979, upload-time = "2026-01-31T23:13:04.62Z" }, + { url = "https://files.pythonhosted.org/packages/de/e5/b7d20451657664b07986c2f6e3be564433f5dcaf3482d68eaecd79afaf03/numpy-2.4.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be71bf1edb48ebbbf7f6337b5bfd2f895d1902f6335a5830b20141fc126ffba0", size = 12502577, upload-time = "2026-01-31T23:13:07.08Z" }, ] [[package]] @@ -1802,7 +1709,7 @@ wheels = [ [[package]] name = "openai" -version = "2.8.1" +version = "2.16.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1814,91 +1721,173 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/e4/42591e356f1d53c568418dc7e30dcda7be31dd5a4d570bca22acb0525862/openai-2.8.1.tar.gz", hash = "sha256:cb1b79eef6e809f6da326a7ef6038719e35aa944c42d081807bfa1be8060f15f", size = 602490, upload-time = "2025-11-17T22:39:59.549Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b1/6c/e4c964fcf1d527fdf4739e7cc940c60075a4114d50d03871d5d5b1e13a88/openai-2.16.0.tar.gz", hash = "sha256:42eaa22ca0d8ded4367a77374104d7a2feafee5bd60a107c3c11b5243a11cd12", size = 629649, upload-time = "2026-01-27T23:28:02.579Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/83/0315bf2cfd75a2ce8a7e54188e9456c60cec6c0cf66728ed07bd9859ff26/openai-2.16.0-py3-none-any.whl", hash = "sha256:5f46643a8f42899a84e80c38838135d7038e7718333ce61396994f887b09a59b", size = 1068612, upload-time = "2026-01-27T23:28:00.356Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/83/dd4660f2956ff88ed071e9e0e36e830df14b8c5dc06722dbde1841accbe8/opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c", size = 20431, upload-time = "2025-10-16T08:35:53.285Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/9e/55a41c9601191e8cd8eb626b54ee6827b9c9d4a46d736f32abc80d8039fc/opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a", size = 18359, upload-time = "2025-10-16T08:35:34.099Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/0a/debcdfb029fbd1ccd1563f7c287b89a6f7bef3b2902ade56797bfd020854/opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b", size = 17282, upload-time = "2025-10-16T08:35:54.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/77/154004c99fb9f291f74aa0822a2f5bbf565a72d8126b3a1b63ed8e5f83c7/opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b", size = 19579, upload-time = "2025-10-16T08:35:36.269Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.59b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, ] [[package]] name = "orjson" -version = "3.11.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c6/fe/ed708782d6709cc60eb4c2d8a361a440661f74134675c72990f2c48c785f/orjson-3.11.4.tar.gz", hash = "sha256:39485f4ab4c9b30a3943cfe99e1a213c4776fb69e8abd68f66b83d5a0b0fdc6d", size = 5945188, upload-time = "2025-10-24T15:50:38.027Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/63/1d/1ea6005fffb56715fd48f632611e163d1604e8316a5bad2288bee9a1c9eb/orjson-3.11.4-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5e59d23cd93ada23ec59a96f215139753fbfe3a4d989549bcb390f8c00370b39", size = 243498, upload-time = "2025-10-24T15:48:48.101Z" }, - { url = "https://files.pythonhosted.org/packages/37/d7/ffed10c7da677f2a9da307d491b9eb1d0125b0307019c4ad3d665fd31f4f/orjson-3.11.4-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:5c3aedecfc1beb988c27c79d52ebefab93b6c3921dbec361167e6559aba2d36d", size = 128961, upload-time = "2025-10-24T15:48:49.571Z" }, - { url = "https://files.pythonhosted.org/packages/a2/96/3e4d10a18866d1368f73c8c44b7fe37cc8a15c32f2a7620be3877d4c55a3/orjson-3.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da9e5301f1c2caa2a9a4a303480d79c9ad73560b2e7761de742ab39fe59d9175", size = 130321, upload-time = "2025-10-24T15:48:50.713Z" }, - { url = "https://files.pythonhosted.org/packages/eb/1f/465f66e93f434f968dd74d5b623eb62c657bdba2332f5a8be9f118bb74c7/orjson-3.11.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8873812c164a90a79f65368f8f96817e59e35d0cc02786a5356f0e2abed78040", size = 129207, upload-time = "2025-10-24T15:48:52.193Z" }, - { url = "https://files.pythonhosted.org/packages/28/43/d1e94837543321c119dff277ae8e348562fe8c0fafbb648ef7cb0c67e521/orjson-3.11.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5d7feb0741ebb15204e748f26c9638e6665a5fa93c37a2c73d64f1669b0ddc63", size = 136323, upload-time = "2025-10-24T15:48:54.806Z" }, - { url = "https://files.pythonhosted.org/packages/bf/04/93303776c8890e422a5847dd012b4853cdd88206b8bbd3edc292c90102d1/orjson-3.11.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01ee5487fefee21e6910da4c2ee9eef005bee568a0879834df86f888d2ffbdd9", size = 137440, upload-time = "2025-10-24T15:48:56.326Z" }, - { url = "https://files.pythonhosted.org/packages/1e/ef/75519d039e5ae6b0f34d0336854d55544ba903e21bf56c83adc51cd8bf82/orjson-3.11.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d40d46f348c0321df01507f92b95a377240c4ec31985225a6668f10e2676f9a", size = 136680, upload-time = "2025-10-24T15:48:57.476Z" }, - { url = "https://files.pythonhosted.org/packages/b5/18/bf8581eaae0b941b44efe14fee7b7862c3382fbc9a0842132cfc7cf5ecf4/orjson-3.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95713e5fc8af84d8edc75b785d2386f653b63d62b16d681687746734b4dfc0be", size = 136160, upload-time = "2025-10-24T15:48:59.631Z" }, - { url = "https://files.pythonhosted.org/packages/c4/35/a6d582766d351f87fc0a22ad740a641b0a8e6fc47515e8614d2e4790ae10/orjson-3.11.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad73ede24f9083614d6c4ca9a85fe70e33be7bf047ec586ee2363bc7418fe4d7", size = 140318, upload-time = "2025-10-24T15:49:00.834Z" }, - { url = "https://files.pythonhosted.org/packages/76/b3/5a4801803ab2e2e2d703bce1a56540d9f99a9143fbec7bf63d225044fef8/orjson-3.11.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:842289889de515421f3f224ef9c1f1efb199a32d76d8d2ca2706fa8afe749549", size = 406330, upload-time = "2025-10-24T15:49:02.327Z" }, - { url = "https://files.pythonhosted.org/packages/80/55/a8f682f64833e3a649f620eafefee175cbfeb9854fc5b710b90c3bca45df/orjson-3.11.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3b2427ed5791619851c52a1261b45c233930977e7de8cf36de05636c708fa905", size = 149580, upload-time = "2025-10-24T15:49:03.517Z" }, - { url = "https://files.pythonhosted.org/packages/ad/e4/c132fa0c67afbb3eb88274fa98df9ac1f631a675e7877037c611805a4413/orjson-3.11.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c36e524af1d29982e9b190573677ea02781456b2e537d5840e4538a5ec41907", size = 139846, upload-time = "2025-10-24T15:49:04.761Z" }, - { url = "https://files.pythonhosted.org/packages/54/06/dc3491489efd651fef99c5908e13951abd1aead1257c67f16135f95ce209/orjson-3.11.4-cp311-cp311-win32.whl", hash = "sha256:87255b88756eab4a68ec61837ca754e5d10fa8bc47dc57f75cedfeaec358d54c", size = 135781, upload-time = "2025-10-24T15:49:05.969Z" }, - { url = "https://files.pythonhosted.org/packages/79/b7/5e5e8d77bd4ea02a6ac54c42c818afb01dd31961be8a574eb79f1d2cfb1e/orjson-3.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:e2d5d5d798aba9a0e1fede8d853fa899ce2cb930ec0857365f700dffc2c7af6a", size = 131391, upload-time = "2025-10-24T15:49:07.355Z" }, - { url = "https://files.pythonhosted.org/packages/0f/dc/9484127cc1aa213be398ed735f5f270eedcb0c0977303a6f6ddc46b60204/orjson-3.11.4-cp311-cp311-win_arm64.whl", hash = "sha256:6bb6bb41b14c95d4f2702bce9975fda4516f1db48e500102fc4d8119032ff045", size = 126252, upload-time = "2025-10-24T15:49:08.869Z" }, - { url = "https://files.pythonhosted.org/packages/63/51/6b556192a04595b93e277a9ff71cd0cc06c21a7df98bcce5963fa0f5e36f/orjson-3.11.4-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d4371de39319d05d3f482f372720b841c841b52f5385bd99c61ed69d55d9ab50", size = 243571, upload-time = "2025-10-24T15:49:10.008Z" }, - { url = "https://files.pythonhosted.org/packages/1c/2c/2602392ddf2601d538ff11848b98621cd465d1a1ceb9db9e8043181f2f7b/orjson-3.11.4-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:e41fd3b3cac850eaae78232f37325ed7d7436e11c471246b87b2cd294ec94853", size = 128891, upload-time = "2025-10-24T15:49:11.297Z" }, - { url = "https://files.pythonhosted.org/packages/4e/47/bf85dcf95f7a3a12bf223394a4f849430acd82633848d52def09fa3f46ad/orjson-3.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600e0e9ca042878c7fdf189cf1b028fe2c1418cc9195f6cb9824eb6ed99cb938", size = 130137, upload-time = "2025-10-24T15:49:12.544Z" }, - { url = "https://files.pythonhosted.org/packages/b4/4d/a0cb31007f3ab6f1fd2a1b17057c7c349bc2baf8921a85c0180cc7be8011/orjson-3.11.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7bbf9b333f1568ef5da42bc96e18bf30fd7f8d54e9ae066d711056add508e415", size = 129152, upload-time = "2025-10-24T15:49:13.754Z" }, - { url = "https://files.pythonhosted.org/packages/f7/ef/2811def7ce3d8576b19e3929fff8f8f0d44bc5eb2e0fdecb2e6e6cc6c720/orjson-3.11.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4806363144bb6e7297b8e95870e78d30a649fdc4e23fc84daa80c8ebd366ce44", size = 136834, upload-time = "2025-10-24T15:49:15.307Z" }, - { url = "https://files.pythonhosted.org/packages/00/d4/9aee9e54f1809cec8ed5abd9bc31e8a9631d19460e3b8470145d25140106/orjson-3.11.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad355e8308493f527d41154e9053b86a5be892b3b359a5c6d5d95cda23601cb2", size = 137519, upload-time = "2025-10-24T15:49:16.557Z" }, - { url = "https://files.pythonhosted.org/packages/db/ea/67bfdb5465d5679e8ae8d68c11753aaf4f47e3e7264bad66dc2f2249e643/orjson-3.11.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a7517482667fb9f0ff1b2f16fe5829296ed7a655d04d68cd9711a4d8a4e708", size = 136749, upload-time = "2025-10-24T15:49:17.796Z" }, - { url = "https://files.pythonhosted.org/packages/01/7e/62517dddcfce6d53a39543cd74d0dccfcbdf53967017c58af68822100272/orjson-3.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97eb5942c7395a171cbfecc4ef6701fc3c403e762194683772df4c54cfbb2210", size = 136325, upload-time = "2025-10-24T15:49:19.347Z" }, - { url = "https://files.pythonhosted.org/packages/18/ae/40516739f99ab4c7ec3aaa5cc242d341fcb03a45d89edeeaabc5f69cb2cf/orjson-3.11.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:149d95d5e018bdd822e3f38c103b1a7c91f88d38a88aada5c4e9b3a73a244241", size = 140204, upload-time = "2025-10-24T15:49:20.545Z" }, - { url = "https://files.pythonhosted.org/packages/82/18/ff5734365623a8916e3a4037fcef1cd1782bfc14cf0992afe7940c5320bf/orjson-3.11.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:624f3951181eb46fc47dea3d221554e98784c823e7069edb5dbd0dc826ac909b", size = 406242, upload-time = "2025-10-24T15:49:21.884Z" }, - { url = "https://files.pythonhosted.org/packages/e1/43/96436041f0a0c8c8deca6a05ebeaf529bf1de04839f93ac5e7c479807aec/orjson-3.11.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:03bfa548cf35e3f8b3a96c4e8e41f753c686ff3d8e182ce275b1751deddab58c", size = 150013, upload-time = "2025-10-24T15:49:23.185Z" }, - { url = "https://files.pythonhosted.org/packages/1b/48/78302d98423ed8780479a1e682b9aecb869e8404545d999d34fa486e573e/orjson-3.11.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:525021896afef44a68148f6ed8a8bf8375553d6066c7f48537657f64823565b9", size = 139951, upload-time = "2025-10-24T15:49:24.428Z" }, - { url = "https://files.pythonhosted.org/packages/4a/7b/ad613fdcdaa812f075ec0875143c3d37f8654457d2af17703905425981bf/orjson-3.11.4-cp312-cp312-win32.whl", hash = "sha256:b58430396687ce0f7d9eeb3dd47761ca7d8fda8e9eb92b3077a7a353a75efefa", size = 136049, upload-time = "2025-10-24T15:49:25.973Z" }, - { url = "https://files.pythonhosted.org/packages/b9/3c/9cf47c3ff5f39b8350fb21ba65d789b6a1129d4cbb3033ba36c8a9023520/orjson-3.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:c6dbf422894e1e3c80a177133c0dda260f81428f9de16d61041949f6a2e5c140", size = 131461, upload-time = "2025-10-24T15:49:27.259Z" }, - { url = "https://files.pythonhosted.org/packages/c6/3b/e2425f61e5825dc5b08c2a5a2b3af387eaaca22a12b9c8c01504f8614c36/orjson-3.11.4-cp312-cp312-win_arm64.whl", hash = "sha256:d38d2bc06d6415852224fcc9c0bfa834c25431e466dc319f0edd56cca81aa96e", size = 126167, upload-time = "2025-10-24T15:49:28.511Z" }, - { url = "https://files.pythonhosted.org/packages/23/15/c52aa7112006b0f3d6180386c3a46ae057f932ab3425bc6f6ac50431cca1/orjson-3.11.4-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2d6737d0e616a6e053c8b4acc9eccea6b6cce078533666f32d140e4f85002534", size = 243525, upload-time = "2025-10-24T15:49:29.737Z" }, - { url = "https://files.pythonhosted.org/packages/ec/38/05340734c33b933fd114f161f25a04e651b0c7c33ab95e9416ade5cb44b8/orjson-3.11.4-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:afb14052690aa328cc118a8e09f07c651d301a72e44920b887c519b313d892ff", size = 128871, upload-time = "2025-10-24T15:49:31.109Z" }, - { url = "https://files.pythonhosted.org/packages/55/b9/ae8d34899ff0c012039b5a7cb96a389b2476e917733294e498586b45472d/orjson-3.11.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38aa9e65c591febb1b0aed8da4d469eba239d434c218562df179885c94e1a3ad", size = 130055, upload-time = "2025-10-24T15:49:33.382Z" }, - { url = "https://files.pythonhosted.org/packages/33/aa/6346dd5073730451bee3681d901e3c337e7ec17342fb79659ec9794fc023/orjson-3.11.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f2cf4dfaf9163b0728d061bebc1e08631875c51cd30bf47cb9e3293bfbd7dcd5", size = 129061, upload-time = "2025-10-24T15:49:34.935Z" }, - { url = "https://files.pythonhosted.org/packages/39/e4/8eea51598f66a6c853c380979912d17ec510e8e66b280d968602e680b942/orjson-3.11.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89216ff3dfdde0e4070932e126320a1752c9d9a758d6a32ec54b3b9334991a6a", size = 136541, upload-time = "2025-10-24T15:49:36.923Z" }, - { url = "https://files.pythonhosted.org/packages/9a/47/cb8c654fa9adcc60e99580e17c32b9e633290e6239a99efa6b885aba9dbc/orjson-3.11.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9daa26ca8e97fae0ce8aa5d80606ef8f7914e9b129b6b5df9104266f764ce436", size = 137535, upload-time = "2025-10-24T15:49:38.307Z" }, - { url = "https://files.pythonhosted.org/packages/43/92/04b8cc5c2b729f3437ee013ce14a60ab3d3001465d95c184758f19362f23/orjson-3.11.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c8b2769dc31883c44a9cd126560327767f848eb95f99c36c9932f51090bfce9", size = 136703, upload-time = "2025-10-24T15:49:40.795Z" }, - { url = "https://files.pythonhosted.org/packages/aa/fd/d0733fcb9086b8be4ebcfcda2d0312865d17d0d9884378b7cffb29d0763f/orjson-3.11.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1469d254b9884f984026bd9b0fa5bbab477a4bfe558bba6848086f6d43eb5e73", size = 136293, upload-time = "2025-10-24T15:49:42.347Z" }, - { url = "https://files.pythonhosted.org/packages/c2/d7/3c5514e806837c210492d72ae30ccf050ce3f940f45bf085bab272699ef4/orjson-3.11.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:68e44722541983614e37117209a194e8c3ad07838ccb3127d96863c95ec7f1e0", size = 140131, upload-time = "2025-10-24T15:49:43.638Z" }, - { url = "https://files.pythonhosted.org/packages/9c/dd/ba9d32a53207babf65bd510ac4d0faaa818bd0df9a9c6f472fe7c254f2e3/orjson-3.11.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:8e7805fda9672c12be2f22ae124dcd7b03928d6c197544fe12174b86553f3196", size = 406164, upload-time = "2025-10-24T15:49:45.498Z" }, - { url = "https://files.pythonhosted.org/packages/8e/f9/f68ad68f4af7c7bde57cd514eaa2c785e500477a8bc8f834838eb696a685/orjson-3.11.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:04b69c14615fb4434ab867bf6f38b2d649f6f300af30a6705397e895f7aec67a", size = 149859, upload-time = "2025-10-24T15:49:46.981Z" }, - { url = "https://files.pythonhosted.org/packages/b6/d2/7f847761d0c26818395b3d6b21fb6bc2305d94612a35b0a30eae65a22728/orjson-3.11.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:639c3735b8ae7f970066930e58cf0ed39a852d417c24acd4a25fc0b3da3c39a6", size = 139926, upload-time = "2025-10-24T15:49:48.321Z" }, - { url = "https://files.pythonhosted.org/packages/9f/37/acd14b12dc62db9a0e1d12386271b8661faae270b22492580d5258808975/orjson-3.11.4-cp313-cp313-win32.whl", hash = "sha256:6c13879c0d2964335491463302a6ca5ad98105fc5db3565499dcb80b1b4bd839", size = 136007, upload-time = "2025-10-24T15:49:49.938Z" }, - { url = "https://files.pythonhosted.org/packages/c0/a9/967be009ddf0a1fffd7a67de9c36656b28c763659ef91352acc02cbe364c/orjson-3.11.4-cp313-cp313-win_amd64.whl", hash = "sha256:09bf242a4af98732db9f9a1ec57ca2604848e16f132e3f72edfd3c5c96de009a", size = 131314, upload-time = "2025-10-24T15:49:51.248Z" }, - { url = "https://files.pythonhosted.org/packages/cb/db/399abd6950fbd94ce125cb8cd1a968def95174792e127b0642781e040ed4/orjson-3.11.4-cp313-cp313-win_arm64.whl", hash = "sha256:a85f0adf63319d6c1ba06fb0dbf997fced64a01179cf17939a6caca662bf92de", size = 126152, upload-time = "2025-10-24T15:49:52.922Z" }, - { url = "https://files.pythonhosted.org/packages/25/e3/54ff63c093cc1697e758e4fceb53164dd2661a7d1bcd522260ba09f54533/orjson-3.11.4-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:42d43a1f552be1a112af0b21c10a5f553983c2a0938d2bbb8ecd8bc9fb572803", size = 243501, upload-time = "2025-10-24T15:49:54.288Z" }, - { url = "https://files.pythonhosted.org/packages/ac/7d/e2d1076ed2e8e0ae9badca65bf7ef22710f93887b29eaa37f09850604e09/orjson-3.11.4-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:26a20f3fbc6c7ff2cb8e89c4c5897762c9d88cf37330c6a117312365d6781d54", size = 128862, upload-time = "2025-10-24T15:49:55.961Z" }, - { url = "https://files.pythonhosted.org/packages/9f/37/ca2eb40b90621faddfa9517dfe96e25f5ae4d8057a7c0cdd613c17e07b2c/orjson-3.11.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e3f20be9048941c7ffa8fc523ccbd17f82e24df1549d1d1fe9317712d19938e", size = 130047, upload-time = "2025-10-24T15:49:57.406Z" }, - { url = "https://files.pythonhosted.org/packages/c7/62/1021ed35a1f2bad9040f05fa4cc4f9893410df0ba3eaa323ccf899b1c90a/orjson-3.11.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aac364c758dc87a52e68e349924d7e4ded348dedff553889e4d9f22f74785316", size = 129073, upload-time = "2025-10-24T15:49:58.782Z" }, - { url = "https://files.pythonhosted.org/packages/e8/3f/f84d966ec2a6fd5f73b1a707e7cd876813422ae4bf9f0145c55c9c6a0f57/orjson-3.11.4-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d5c54a6d76e3d741dcc3f2707f8eeb9ba2a791d3adbf18f900219b62942803b1", size = 136597, upload-time = "2025-10-24T15:50:00.12Z" }, - { url = "https://files.pythonhosted.org/packages/32/78/4fa0aeca65ee82bbabb49e055bd03fa4edea33f7c080c5c7b9601661ef72/orjson-3.11.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f28485bdca8617b79d44627f5fb04336897041dfd9fa66d383a49d09d86798bc", size = 137515, upload-time = "2025-10-24T15:50:01.57Z" }, - { url = "https://files.pythonhosted.org/packages/c1/9d/0c102e26e7fde40c4c98470796d050a2ec1953897e2c8ab0cb95b0759fa2/orjson-3.11.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bfc2a484cad3585e4ba61985a6062a4c2ed5c7925db6d39f1fa267c9d166487f", size = 136703, upload-time = "2025-10-24T15:50:02.944Z" }, - { url = "https://files.pythonhosted.org/packages/df/ac/2de7188705b4cdfaf0b6c97d2f7849c17d2003232f6e70df98602173f788/orjson-3.11.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e34dbd508cb91c54f9c9788923daca129fe5b55c5b4eebe713bf5ed3791280cf", size = 136311, upload-time = "2025-10-24T15:50:04.441Z" }, - { url = "https://files.pythonhosted.org/packages/e0/52/847fcd1a98407154e944feeb12e3b4d487a0e264c40191fb44d1269cbaa1/orjson-3.11.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b13c478fa413d4b4ee606ec8e11c3b2e52683a640b006bb586b3041c2ca5f606", size = 140127, upload-time = "2025-10-24T15:50:07.398Z" }, - { url = "https://files.pythonhosted.org/packages/c1/ae/21d208f58bdb847dd4d0d9407e2929862561841baa22bdab7aea10ca088e/orjson-3.11.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:724ca721ecc8a831b319dcd72cfa370cc380db0bf94537f08f7edd0a7d4e1780", size = 406201, upload-time = "2025-10-24T15:50:08.796Z" }, - { url = "https://files.pythonhosted.org/packages/8d/55/0789d6de386c8366059db098a628e2ad8798069e94409b0d8935934cbcb9/orjson-3.11.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:977c393f2e44845ce1b540e19a786e9643221b3323dae190668a98672d43fb23", size = 149872, upload-time = "2025-10-24T15:50:10.234Z" }, - { url = "https://files.pythonhosted.org/packages/cc/1d/7ff81ea23310e086c17b41d78a72270d9de04481e6113dbe2ac19118f7fb/orjson-3.11.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1e539e382cf46edec157ad66b0b0872a90d829a6b71f17cb633d6c160a223155", size = 139931, upload-time = "2025-10-24T15:50:11.623Z" }, - { url = "https://files.pythonhosted.org/packages/77/92/25b886252c50ed64be68c937b562b2f2333b45afe72d53d719e46a565a50/orjson-3.11.4-cp314-cp314-win32.whl", hash = "sha256:d63076d625babab9db5e7836118bdfa086e60f37d8a174194ae720161eb12394", size = 136065, upload-time = "2025-10-24T15:50:13.025Z" }, - { url = "https://files.pythonhosted.org/packages/63/b8/718eecf0bb7e9d64e4956afaafd23db9f04c776d445f59fe94f54bdae8f0/orjson-3.11.4-cp314-cp314-win_amd64.whl", hash = "sha256:0a54d6635fa3aaa438ae32e8570b9f0de36f3f6562c308d2a2a452e8b0592db1", size = 131310, upload-time = "2025-10-24T15:50:14.46Z" }, - { url = "https://files.pythonhosted.org/packages/1a/bf/def5e25d4d8bfce296a9a7c8248109bf58622c21618b590678f945a2c59c/orjson-3.11.4-cp314-cp314-win_arm64.whl", hash = "sha256:78b999999039db3cf58f6d230f524f04f75f129ba3d1ca2ed121f8657e575d3d", size = 126151, upload-time = "2025-10-24T15:50:15.878Z" }, +version = "3.11.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/70/a3/4e09c61a5f0c521cba0bb433639610ae037437669f1a4cbc93799e731d78/orjson-3.11.6.tar.gz", hash = "sha256:0a54c72259f35299fd033042367df781c2f66d10252955ca1efb7db309b954cb", size = 6175856, upload-time = "2026-01-29T15:13:07.942Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/fd/d6b0a36854179b93ed77839f107c4089d91cccc9f9ba1b752b6e3bac5f34/orjson-3.11.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e259e85a81d76d9665f03d6129e09e4435531870de5961ddcd0bf6e3a7fde7d7", size = 250029, upload-time = "2026-01-29T15:11:35.942Z" }, + { url = "https://files.pythonhosted.org/packages/a3/bb/22902619826641cf3b627c24aab62e2ad6b571bdd1d34733abb0dd57f67a/orjson-3.11.6-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:52263949f41b4a4822c6b1353bcc5ee2f7109d53a3b493501d3369d6d0e7937a", size = 134518, upload-time = "2026-01-29T15:11:37.347Z" }, + { url = "https://files.pythonhosted.org/packages/72/90/7a818da4bba1de711a9653c420749c0ac95ef8f8651cbc1dca551f462fe0/orjson-3.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6439e742fa7834a24698d358a27346bb203bff356ae0402e7f5df8f749c621a8", size = 137917, upload-time = "2026-01-29T15:11:38.511Z" }, + { url = "https://files.pythonhosted.org/packages/59/0f/02846c1cac8e205cb3822dd8aa8f9114acda216f41fd1999ace6b543418d/orjson-3.11.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b81ffd68f084b4e993e3867acb554a049fa7787cc8710bbcc1e26965580d99be", size = 134923, upload-time = "2026-01-29T15:11:39.711Z" }, + { url = "https://files.pythonhosted.org/packages/94/cf/aeaf683001b474bb3c3c757073a4231dfdfe8467fceaefa5bfd40902c99f/orjson-3.11.6-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5a5468e5e60f7ef6d7f9044b06c8f94a3c56ba528c6e4f7f06ae95164b595ec", size = 140752, upload-time = "2026-01-29T15:11:41.347Z" }, + { url = "https://files.pythonhosted.org/packages/fc/fe/dad52d8315a65f084044a0819d74c4c9daf9ebe0681d30f525b0d29a31f0/orjson-3.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72c5005eb45bd2535632d4f3bec7ad392832cfc46b62a3021da3b48a67734b45", size = 144201, upload-time = "2026-01-29T15:11:42.537Z" }, + { url = "https://files.pythonhosted.org/packages/36/bc/ab070dd421565b831801077f1e390c4d4af8bfcecafc110336680a33866b/orjson-3.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b14dd49f3462b014455a28a4d810d3549bf990567653eb43765cd847df09145", size = 142380, upload-time = "2026-01-29T15:11:44.309Z" }, + { url = "https://files.pythonhosted.org/packages/e6/d8/4b581c725c3a308717f28bf45a9fdac210bca08b67e8430143699413ff06/orjson-3.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bb2c1ea30ef302f0f89f9bf3e7f9ab5e2af29dc9f80eb87aa99788e4e2d65", size = 145582, upload-time = "2026-01-29T15:11:45.506Z" }, + { url = "https://files.pythonhosted.org/packages/5b/a2/09aab99b39f9a7f175ea8fa29adb9933a3d01e7d5d603cdee7f1c40c8da2/orjson-3.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:825e0a85d189533c6bff7e2fc417a28f6fcea53d27125c4551979aecd6c9a197", size = 147270, upload-time = "2026-01-29T15:11:46.782Z" }, + { url = "https://files.pythonhosted.org/packages/b8/2f/5ef8eaf7829dc50da3bf497c7775b21ee88437bc8c41f959aa3504ca6631/orjson-3.11.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:b04575417a26530637f6ab4b1f7b4f666eb0433491091da4de38611f97f2fcf3", size = 421222, upload-time = "2026-01-29T15:11:48.106Z" }, + { url = "https://files.pythonhosted.org/packages/3b/b0/dd6b941294c2b5b13da5fdc7e749e58d0c55a5114ab37497155e83050e95/orjson-3.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b83eb2e40e8c4da6d6b340ee6b1d6125f5195eb1b0ebb7eac23c6d9d4f92d224", size = 155562, upload-time = "2026-01-29T15:11:49.408Z" }, + { url = "https://files.pythonhosted.org/packages/8e/09/43924331a847476ae2f9a16bd6d3c9dab301265006212ba0d3d7fd58763a/orjson-3.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1f42da604ee65a6b87eef858c913ce3e5777872b19321d11e6fc6d21de89b64f", size = 147432, upload-time = "2026-01-29T15:11:50.635Z" }, + { url = "https://files.pythonhosted.org/packages/5d/e9/d9865961081816909f6b49d880749dbbd88425afd7c5bbce0549e2290d77/orjson-3.11.6-cp311-cp311-win32.whl", hash = "sha256:5ae45df804f2d344cffb36c43fdf03c82fb6cd247f5faa41e21891b40dfbf733", size = 139623, upload-time = "2026-01-29T15:11:51.82Z" }, + { url = "https://files.pythonhosted.org/packages/b4/f9/6836edb92f76eec1082919101eb1145d2f9c33c8f2c5e6fa399b82a2aaa8/orjson-3.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:f4295948d65ace0a2d8f2c4ccc429668b7eb8af547578ec882e16bf79b0050b2", size = 136647, upload-time = "2026-01-29T15:11:53.454Z" }, + { url = "https://files.pythonhosted.org/packages/b3/0c/4954082eea948c9ae52ee0bcbaa2f99da3216a71bcc314ab129bde22e565/orjson-3.11.6-cp311-cp311-win_arm64.whl", hash = "sha256:314e9c45e0b81b547e3a1cfa3df3e07a815821b3dac9fe8cb75014071d0c16a4", size = 135327, upload-time = "2026-01-29T15:11:56.616Z" }, + { url = "https://files.pythonhosted.org/packages/14/ba/759f2879f41910b7e5e0cdbd9cf82a4f017c527fb0e972e9869ca7fe4c8e/orjson-3.11.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6f03f30cd8953f75f2a439070c743c7336d10ee940da918d71c6f3556af3ddcf", size = 249988, upload-time = "2026-01-29T15:11:58.294Z" }, + { url = "https://files.pythonhosted.org/packages/f0/70/54cecb929e6c8b10104fcf580b0cc7dc551aa193e83787dd6f3daba28bb5/orjson-3.11.6-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:af44baae65ef386ad971469a8557a0673bb042b0b9fd4397becd9c2dfaa02588", size = 134445, upload-time = "2026-01-29T15:11:59.819Z" }, + { url = "https://files.pythonhosted.org/packages/f2/6f/ec0309154457b9ba1ad05f11faa4441f76037152f75e1ac577db3ce7ca96/orjson-3.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c310a48542094e4f7dbb6ac076880994986dda8ca9186a58c3cb70a3514d3231", size = 137708, upload-time = "2026-01-29T15:12:01.488Z" }, + { url = "https://files.pythonhosted.org/packages/20/52/3c71b80840f8bab9cb26417302707b7716b7d25f863f3a541bcfa232fe6e/orjson-3.11.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8dfa7a5d387f15ecad94cb6b2d2d5f4aeea64efd8d526bfc03c9812d01e1cc0", size = 134798, upload-time = "2026-01-29T15:12:02.705Z" }, + { url = "https://files.pythonhosted.org/packages/30/51/b490a43b22ff736282360bd02e6bded455cf31dfc3224e01cd39f919bbd2/orjson-3.11.6-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba8daee3e999411b50f8b50dbb0a3071dd1845f3f9a1a0a6fa6de86d1689d84d", size = 140839, upload-time = "2026-01-29T15:12:03.956Z" }, + { url = "https://files.pythonhosted.org/packages/95/bc/4bcfe4280c1bc63c5291bb96f98298845b6355da2226d3400e17e7b51e53/orjson-3.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f89d104c974eafd7436d7a5fdbc57f7a1e776789959a2f4f1b2eab5c62a339f4", size = 144080, upload-time = "2026-01-29T15:12:05.151Z" }, + { url = "https://files.pythonhosted.org/packages/01/74/22970f9ead9ab1f1b5f8c227a6c3aa8d71cd2c5acd005868a1d44f2362fa/orjson-3.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2e2e2456788ca5ea75616c40da06fc885a7dc0389780e8a41bf7c5389ba257b", size = 142435, upload-time = "2026-01-29T15:12:06.641Z" }, + { url = "https://files.pythonhosted.org/packages/29/34/d564aff85847ab92c82ee43a7a203683566c2fca0723a5f50aebbe759603/orjson-3.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a42efebc45afabb1448001e90458c4020d5c64fbac8a8dc4045b777db76cb5a", size = 145631, upload-time = "2026-01-29T15:12:08.351Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ef/016957a3890752c4aa2368326ea69fa53cdc1fdae0a94a542b6410dbdf52/orjson-3.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71b7cbef8471324966c3738c90ba38775563ef01b512feb5ad4805682188d1b9", size = 147058, upload-time = "2026-01-29T15:12:10.023Z" }, + { url = "https://files.pythonhosted.org/packages/56/cc/9a899c3972085645b3225569f91a30e221f441e5dc8126e6d060b971c252/orjson-3.11.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:f8515e5910f454fe9a8e13c2bb9dc4bae4c1836313e967e72eb8a4ad874f0248", size = 421161, upload-time = "2026-01-29T15:12:11.308Z" }, + { url = "https://files.pythonhosted.org/packages/21/a8/767d3fbd6d9b8fdee76974db40619399355fd49bf91a6dd2c4b6909ccf05/orjson-3.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:300360edf27c8c9bf7047345a94fddf3a8b8922df0ff69d71d854a170cb375cf", size = 155757, upload-time = "2026-01-29T15:12:12.776Z" }, + { url = "https://files.pythonhosted.org/packages/ad/0b/205cd69ac87e2272e13ef3f5f03a3d4657e317e38c1b08aaa2ef97060bbc/orjson-3.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:caaed4dad39e271adfadc106fab634d173b2bb23d9cf7e67bd645f879175ebfc", size = 147446, upload-time = "2026-01-29T15:12:14.166Z" }, + { url = "https://files.pythonhosted.org/packages/de/c5/dd9f22aa9f27c54c7d05cc32f4580c9ac9b6f13811eeb81d6c4c3f50d6b1/orjson-3.11.6-cp312-cp312-win32.whl", hash = "sha256:955368c11808c89793e847830e1b1007503a5923ddadc108547d3b77df761044", size = 139717, upload-time = "2026-01-29T15:12:15.7Z" }, + { url = "https://files.pythonhosted.org/packages/23/a1/e62fc50d904486970315a1654b8cfb5832eb46abb18cd5405118e7e1fc79/orjson-3.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:2c68de30131481150073d90a5d227a4a421982f42c025ecdfb66157f9579e06f", size = 136711, upload-time = "2026-01-29T15:12:17.055Z" }, + { url = "https://files.pythonhosted.org/packages/04/3d/b4fefad8bdf91e0fe212eb04975aeb36ea92997269d68857efcc7eb1dda3/orjson-3.11.6-cp312-cp312-win_arm64.whl", hash = "sha256:65dfa096f4e3a5e02834b681f539a87fbe85adc82001383c0db907557f666bfc", size = 135212, upload-time = "2026-01-29T15:12:18.3Z" }, + { url = "https://files.pythonhosted.org/packages/ae/45/d9c71c8c321277bc1ceebf599bc55ba826ae538b7c61f287e9a7e71bd589/orjson-3.11.6-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e4ae1670caabb598a88d385798692ce2a1b2f078971b3329cfb85253c6097f5b", size = 249828, upload-time = "2026-01-29T15:12:20.14Z" }, + { url = "https://files.pythonhosted.org/packages/ac/7e/4afcf4cfa9c2f93846d70eee9c53c3c0123286edcbeb530b7e9bd2aea1b2/orjson-3.11.6-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:2c6b81f47b13dac2caa5d20fbc953c75eb802543abf48403a4703ed3bff225f0", size = 134339, upload-time = "2026-01-29T15:12:22.01Z" }, + { url = "https://files.pythonhosted.org/packages/40/10/6d2b8a064c8d2411d3d0ea6ab43125fae70152aef6bea77bb50fa54d4097/orjson-3.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:647d6d034e463764e86670644bdcaf8e68b076e6e74783383b01085ae9ab334f", size = 137662, upload-time = "2026-01-29T15:12:23.307Z" }, + { url = "https://files.pythonhosted.org/packages/5a/50/5804ea7d586baf83ee88969eefda97a24f9a5bdba0727f73e16305175b26/orjson-3.11.6-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8523b9cc4ef174ae52414f7699e95ee657c16aa18b3c3c285d48d7966cce9081", size = 134626, upload-time = "2026-01-29T15:12:25.099Z" }, + { url = "https://files.pythonhosted.org/packages/9e/2e/f0492ed43e376722bb4afd648e06cc1e627fc7ec8ff55f6ee739277813ea/orjson-3.11.6-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:313dfd7184cde50c733fc0d5c8c0e2f09017b573afd11dc36bd7476b30b4cb17", size = 140873, upload-time = "2026-01-29T15:12:26.369Z" }, + { url = "https://files.pythonhosted.org/packages/10/15/6f874857463421794a303a39ac5494786ad46a4ab46d92bda6705d78c5aa/orjson-3.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:905ee036064ff1e1fd1fb800055ac477cdcb547a78c22c1bc2bbf8d5d1a6fb42", size = 144044, upload-time = "2026-01-29T15:12:28.082Z" }, + { url = "https://files.pythonhosted.org/packages/d2/c7/b7223a3a70f1d0cc2d86953825de45f33877ee1b124a91ca1f79aa6e643f/orjson-3.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce374cb98411356ba906914441fc993f271a7a666d838d8de0e0900dd4a4bc12", size = 142396, upload-time = "2026-01-29T15:12:30.529Z" }, + { url = "https://files.pythonhosted.org/packages/87/e3/aa1b6d3ad3cd80f10394134f73ae92a1d11fdbe974c34aa199cc18bb5fcf/orjson-3.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cded072b9f65fcfd188aead45efa5bd528ba552add619b3ad2a81f67400ec450", size = 145600, upload-time = "2026-01-29T15:12:31.848Z" }, + { url = "https://files.pythonhosted.org/packages/f6/cf/e4aac5a46cbd39d7e769ef8650efa851dfce22df1ba97ae2b33efe893b12/orjson-3.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7ab85bdbc138e1f73a234db6bb2e4cc1f0fcec8f4bd2bd2430e957a01aadf746", size = 146967, upload-time = "2026-01-29T15:12:33.203Z" }, + { url = "https://files.pythonhosted.org/packages/0b/04/975b86a4bcf6cfeda47aad15956d52fbeda280811206e9967380fa9355c8/orjson-3.11.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:351b96b614e3c37a27b8ab048239ebc1e0be76cc17481a430d70a77fb95d3844", size = 421003, upload-time = "2026-01-29T15:12:35.097Z" }, + { url = "https://files.pythonhosted.org/packages/28/d1/0369d0baf40eea5ff2300cebfe209883b2473ab4aa4c4974c8bd5ee42bb2/orjson-3.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f9959c85576beae5cdcaaf39510b15105f1ee8b70d5dacd90152617f57be8c83", size = 155695, upload-time = "2026-01-29T15:12:36.589Z" }, + { url = "https://files.pythonhosted.org/packages/ab/1f/d10c6d6ae26ff1d7c3eea6fd048280ef2e796d4fb260c5424fd021f68ecf/orjson-3.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:75682d62b1b16b61a30716d7a2ec1f4c36195de4a1c61f6665aedd947b93a5d5", size = 147392, upload-time = "2026-01-29T15:12:37.876Z" }, + { url = "https://files.pythonhosted.org/packages/8d/43/7479921c174441a0aa5277c313732e20713c0969ac303be9f03d88d3db5d/orjson-3.11.6-cp313-cp313-win32.whl", hash = "sha256:40dc277999c2ef227dcc13072be879b4cfd325502daeb5c35ed768f706f2bf30", size = 139718, upload-time = "2026-01-29T15:12:39.274Z" }, + { url = "https://files.pythonhosted.org/packages/88/bc/9ffe7dfbf8454bc4e75bb8bf3a405ed9e0598df1d3535bb4adcd46be07d0/orjson-3.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:f0f6e9f8ff7905660bc3c8a54cd4a675aa98f7f175cf00a59815e2ff42c0d916", size = 136635, upload-time = "2026-01-29T15:12:40.593Z" }, + { url = "https://files.pythonhosted.org/packages/6f/7e/51fa90b451470447ea5023b20d83331ec741ae28d1e6d8ed547c24e7de14/orjson-3.11.6-cp313-cp313-win_arm64.whl", hash = "sha256:1608999478664de848e5900ce41f25c4ecdfc4beacbc632b6fd55e1a586e5d38", size = 135175, upload-time = "2026-01-29T15:12:41.997Z" }, + { url = "https://files.pythonhosted.org/packages/31/9f/46ca908abaeeec7560638ff20276ab327b980d73b3cc2f5b205b4a1c60b3/orjson-3.11.6-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6026db2692041d2a23fe2545606df591687787825ad5821971ef0974f2c47630", size = 249823, upload-time = "2026-01-29T15:12:43.332Z" }, + { url = "https://files.pythonhosted.org/packages/ff/78/ca478089818d18c9cd04f79c43f74ddd031b63c70fa2a946eb5e85414623/orjson-3.11.6-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:132b0ab2e20c73afa85cf142e547511feb3d2f5b7943468984658f3952b467d4", size = 134328, upload-time = "2026-01-29T15:12:45.171Z" }, + { url = "https://files.pythonhosted.org/packages/39/5e/cbb9d830ed4e47f4375ad8eef8e4fff1bf1328437732c3809054fc4e80be/orjson-3.11.6-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b376fb05f20a96ec117d47987dd3b39265c635725bda40661b4c5b73b77b5fde", size = 137651, upload-time = "2026-01-29T15:12:46.602Z" }, + { url = "https://files.pythonhosted.org/packages/7c/3a/35df6558c5bc3a65ce0961aefee7f8364e59af78749fc796ea255bfa0cf5/orjson-3.11.6-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:954dae4e080574672a1dfcf2a840eddef0f27bd89b0e94903dd0824e9c1db060", size = 134596, upload-time = "2026-01-29T15:12:47.95Z" }, + { url = "https://files.pythonhosted.org/packages/cd/8e/3d32dd7b7f26a19cc4512d6ed0ae3429567c71feef720fe699ff43c5bc9e/orjson-3.11.6-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe515bb89d59e1e4b48637a964f480b35c0a2676de24e65e55310f6016cca7ce", size = 140923, upload-time = "2026-01-29T15:12:49.333Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9c/1efbf5c99b3304f25d6f0d493a8d1492ee98693637c10ce65d57be839d7b/orjson-3.11.6-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:380f9709c275917af28feb086813923251e11ee10687257cd7f1ea188bcd4485", size = 144068, upload-time = "2026-01-29T15:12:50.927Z" }, + { url = "https://files.pythonhosted.org/packages/82/83/0d19eeb5be797de217303bbb55dde58dba26f996ed905d301d98fd2d4637/orjson-3.11.6-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8173e0d3f6081e7034c51cf984036d02f6bab2a2126de5a759d79f8e5a140e7", size = 142493, upload-time = "2026-01-29T15:12:52.432Z" }, + { url = "https://files.pythonhosted.org/packages/32/a7/573fec3df4dc8fc259b7770dc6c0656f91adce6e19330c78d23f87945d1e/orjson-3.11.6-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dddf9ba706294906c56ef5150a958317b09aa3a8a48df1c52ccf22ec1907eac", size = 145616, upload-time = "2026-01-29T15:12:53.903Z" }, + { url = "https://files.pythonhosted.org/packages/c2/0e/23551b16f21690f7fd5122e3cf40fdca5d77052a434d0071990f97f5fe2f/orjson-3.11.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cbae5c34588dc79938dffb0b6fbe8c531f4dc8a6ad7f39759a9eb5d2da405ef2", size = 146951, upload-time = "2026-01-29T15:12:55.698Z" }, + { url = "https://files.pythonhosted.org/packages/b8/63/5e6c8f39805c39123a18e412434ea364349ee0012548d08aa586e2bd6aa9/orjson-3.11.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:f75c318640acbddc419733b57f8a07515e587a939d8f54363654041fd1f4e465", size = 421024, upload-time = "2026-01-29T15:12:57.434Z" }, + { url = "https://files.pythonhosted.org/packages/1d/4d/724975cf0087f6550bd01fd62203418afc0ea33fd099aed318c5bcc52df8/orjson-3.11.6-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:e0ab8d13aa2a3e98b4a43487c9205b2c92c38c054b4237777484d503357c8437", size = 155774, upload-time = "2026-01-29T15:12:59.397Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a3/f4c4e3f46b55db29e0a5f20493b924fc791092d9a03ff2068c9fe6c1002f/orjson-3.11.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f884c7fb1020d44612bd7ac0db0babba0e2f78b68d9a650c7959bf99c783773f", size = 147393, upload-time = "2026-01-29T15:13:00.769Z" }, + { url = "https://files.pythonhosted.org/packages/ee/86/6f5529dd27230966171ee126cecb237ed08e9f05f6102bfaf63e5b32277d/orjson-3.11.6-cp314-cp314-win32.whl", hash = "sha256:8d1035d1b25732ec9f971e833a3e299d2b1a330236f75e6fd945ad982c76aaf3", size = 139760, upload-time = "2026-01-29T15:13:02.173Z" }, + { url = "https://files.pythonhosted.org/packages/d3/b5/91ae7037b2894a6b5002fb33f4fbccec98424a928469835c3837fbb22a9b/orjson-3.11.6-cp314-cp314-win_amd64.whl", hash = "sha256:931607a8865d21682bb72de54231655c86df1870502d2962dbfd12c82890d077", size = 136633, upload-time = "2026-01-29T15:13:04.267Z" }, + { url = "https://files.pythonhosted.org/packages/55/74/f473a3ec7a0a7ebc825ca8e3c86763f7d039f379860c81ba12dcdd456547/orjson-3.11.6-cp314-cp314-win_arm64.whl", hash = "sha256:fe71f6b283f4f1832204ab8235ce07adad145052614f77c876fcf0dac97bc06f", size = 135168, upload-time = "2026-01-29T15:13:05.932Z" }, ] [[package]] name = "packaging" -version = "25.0" +version = "26.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, + { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, ] [[package]] name = "pandas" -version = "2.2.3" +version = "2.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, @@ -1906,44 +1895,48 @@ dependencies = [ { name = "pytz" }, { name = "tzdata" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213, upload-time = "2024-09-20T13:10:04.827Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/44/d9502bf0ed197ba9bf1103c9867d5904ddcaf869e52329787fc54ed70cc8/pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039", size = 12602222, upload-time = "2024-09-20T13:08:56.254Z" }, - { url = "https://files.pythonhosted.org/packages/52/11/9eac327a38834f162b8250aab32a6781339c69afe7574368fffe46387edf/pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd", size = 11321274, upload-time = "2024-09-20T13:08:58.645Z" }, - { url = "https://files.pythonhosted.org/packages/45/fb/c4beeb084718598ba19aa9f5abbc8aed8b42f90930da861fcb1acdb54c3a/pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698", size = 15579836, upload-time = "2024-09-20T19:01:57.571Z" }, - { url = "https://files.pythonhosted.org/packages/cd/5f/4dba1d39bb9c38d574a9a22548c540177f78ea47b32f99c0ff2ec499fac5/pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc", size = 13058505, upload-time = "2024-09-20T13:09:01.501Z" }, - { url = "https://files.pythonhosted.org/packages/b9/57/708135b90391995361636634df1f1130d03ba456e95bcf576fada459115a/pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3", size = 16744420, upload-time = "2024-09-20T19:02:00.678Z" }, - { url = "https://files.pythonhosted.org/packages/86/4a/03ed6b7ee323cf30404265c284cee9c65c56a212e0a08d9ee06984ba2240/pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32", size = 14440457, upload-time = "2024-09-20T13:09:04.105Z" }, - { url = "https://files.pythonhosted.org/packages/ed/8c/87ddf1fcb55d11f9f847e3c69bb1c6f8e46e2f40ab1a2d2abadb2401b007/pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5", size = 11617166, upload-time = "2024-09-20T13:09:06.917Z" }, - { url = "https://files.pythonhosted.org/packages/17/a3/fb2734118db0af37ea7433f57f722c0a56687e14b14690edff0cdb4b7e58/pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9", size = 12529893, upload-time = "2024-09-20T13:09:09.655Z" }, - { url = "https://files.pythonhosted.org/packages/e1/0c/ad295fd74bfac85358fd579e271cded3ac969de81f62dd0142c426b9da91/pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4", size = 11363475, upload-time = "2024-09-20T13:09:14.718Z" }, - { url = "https://files.pythonhosted.org/packages/c6/2a/4bba3f03f7d07207481fed47f5b35f556c7441acddc368ec43d6643c5777/pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3", size = 15188645, upload-time = "2024-09-20T19:02:03.88Z" }, - { url = "https://files.pythonhosted.org/packages/38/f8/d8fddee9ed0d0c0f4a2132c1dfcf0e3e53265055da8df952a53e7eaf178c/pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319", size = 12739445, upload-time = "2024-09-20T13:09:17.621Z" }, - { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235, upload-time = "2024-09-20T19:02:07.094Z" }, - { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756, upload-time = "2024-09-20T13:09:20.474Z" }, - { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248, upload-time = "2024-09-20T13:09:23.137Z" }, - { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643, upload-time = "2024-09-20T13:09:25.522Z" }, - { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573, upload-time = "2024-09-20T13:09:28.012Z" }, - { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085, upload-time = "2024-09-20T19:02:10.451Z" }, - { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809, upload-time = "2024-09-20T13:09:30.814Z" }, - { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316, upload-time = "2024-09-20T19:02:13.825Z" }, - { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055, upload-time = "2024-09-20T13:09:33.462Z" }, - { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175, upload-time = "2024-09-20T13:09:35.871Z" }, - { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650, upload-time = "2024-09-20T13:09:38.685Z" }, - { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177, upload-time = "2024-09-20T13:09:41.141Z" }, - { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526, upload-time = "2024-09-20T19:02:16.905Z" }, - { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013, upload-time = "2024-09-20T13:09:44.39Z" }, - { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620, upload-time = "2024-09-20T19:02:20.639Z" }, - { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436, upload-time = "2024-09-20T13:09:48.112Z" }, -] - -[[package]] -name = "pathspec" -version = "0.12.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/33/01/d40b85317f86cf08d853a4f495195c73815fdf205eef3993821720274518/pandas-2.3.3.tar.gz", hash = "sha256:e05e1af93b977f7eafa636d043f9f94c7ee3ac81af99c13508215942e64c993b", size = 4495223, upload-time = "2025-09-29T23:34:51.853Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/fa/7ac648108144a095b4fb6aa3de1954689f7af60a14cf25583f4960ecb878/pandas-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:602b8615ebcc4a0c1751e71840428ddebeb142ec02c786e8ad6b1ce3c8dec523", size = 11578790, upload-time = "2025-09-29T23:18:30.065Z" }, + { url = "https://files.pythonhosted.org/packages/9b/35/74442388c6cf008882d4d4bdfc4109be87e9b8b7ccd097ad1e7f006e2e95/pandas-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8fe25fc7b623b0ef6b5009149627e34d2a4657e880948ec3c840e9402e5c1b45", size = 10833831, upload-time = "2025-09-29T23:38:56.071Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e4/de154cbfeee13383ad58d23017da99390b91d73f8c11856f2095e813201b/pandas-2.3.3-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b468d3dad6ff947df92dcb32ede5b7bd41a9b3cceef0a30ed925f6d01fb8fa66", size = 12199267, upload-time = "2025-09-29T23:18:41.627Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c9/63f8d545568d9ab91476b1818b4741f521646cbdd151c6efebf40d6de6f7/pandas-2.3.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b98560e98cb334799c0b07ca7967ac361a47326e9b4e5a7dfb5ab2b1c9d35a1b", size = 12789281, upload-time = "2025-09-29T23:18:56.834Z" }, + { url = "https://files.pythonhosted.org/packages/f2/00/a5ac8c7a0e67fd1a6059e40aa08fa1c52cc00709077d2300e210c3ce0322/pandas-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37b5848ba49824e5c30bedb9c830ab9b7751fd049bc7914533e01c65f79791", size = 13240453, upload-time = "2025-09-29T23:19:09.247Z" }, + { url = "https://files.pythonhosted.org/packages/27/4d/5c23a5bc7bd209231618dd9e606ce076272c9bc4f12023a70e03a86b4067/pandas-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db4301b2d1f926ae677a751eb2bd0e8c5f5319c9cb3f88b0becbbb0b07b34151", size = 13890361, upload-time = "2025-09-29T23:19:25.342Z" }, + { url = "https://files.pythonhosted.org/packages/8e/59/712db1d7040520de7a4965df15b774348980e6df45c129b8c64d0dbe74ef/pandas-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f086f6fe114e19d92014a1966f43a3e62285109afe874f067f5abbdcbb10e59c", size = 11348702, upload-time = "2025-09-29T23:19:38.296Z" }, + { url = "https://files.pythonhosted.org/packages/9c/fb/231d89e8637c808b997d172b18e9d4a4bc7bf31296196c260526055d1ea0/pandas-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d21f6d74eb1725c2efaa71a2bfc661a0689579b58e9c0ca58a739ff0b002b53", size = 11597846, upload-time = "2025-09-29T23:19:48.856Z" }, + { url = "https://files.pythonhosted.org/packages/5c/bd/bf8064d9cfa214294356c2d6702b716d3cf3bb24be59287a6a21e24cae6b/pandas-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3fd2f887589c7aa868e02632612ba39acb0b8948faf5cc58f0850e165bd46f35", size = 10729618, upload-time = "2025-09-29T23:39:08.659Z" }, + { url = "https://files.pythonhosted.org/packages/57/56/cf2dbe1a3f5271370669475ead12ce77c61726ffd19a35546e31aa8edf4e/pandas-2.3.3-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecaf1e12bdc03c86ad4a7ea848d66c685cb6851d807a26aa245ca3d2017a1908", size = 11737212, upload-time = "2025-09-29T23:19:59.765Z" }, + { url = "https://files.pythonhosted.org/packages/e5/63/cd7d615331b328e287d8233ba9fdf191a9c2d11b6af0c7a59cfcec23de68/pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b3d11d2fda7eb164ef27ffc14b4fcab16a80e1ce67e9f57e19ec0afaf715ba89", size = 12362693, upload-time = "2025-09-29T23:20:14.098Z" }, + { url = "https://files.pythonhosted.org/packages/a6/de/8b1895b107277d52f2b42d3a6806e69cfef0d5cf1d0ba343470b9d8e0a04/pandas-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a68e15f780eddf2b07d242e17a04aa187a7ee12b40b930bfdd78070556550e98", size = 12771002, upload-time = "2025-09-29T23:20:26.76Z" }, + { url = "https://files.pythonhosted.org/packages/87/21/84072af3187a677c5893b170ba2c8fbe450a6ff911234916da889b698220/pandas-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:371a4ab48e950033bcf52b6527eccb564f52dc826c02afd9a1bc0ab731bba084", size = 13450971, upload-time = "2025-09-29T23:20:41.344Z" }, + { url = "https://files.pythonhosted.org/packages/86/41/585a168330ff063014880a80d744219dbf1dd7a1c706e75ab3425a987384/pandas-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:a16dcec078a01eeef8ee61bf64074b4e524a2a3f4b3be9326420cabe59c4778b", size = 10992722, upload-time = "2025-09-29T23:20:54.139Z" }, + { url = "https://files.pythonhosted.org/packages/cd/4b/18b035ee18f97c1040d94debd8f2e737000ad70ccc8f5513f4eefad75f4b/pandas-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:56851a737e3470de7fa88e6131f41281ed440d29a9268dcbf0002da5ac366713", size = 11544671, upload-time = "2025-09-29T23:21:05.024Z" }, + { url = "https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdcd9d1167f4885211e401b3036c0c8d9e274eee67ea8d0758a256d60704cfe8", size = 10680807, upload-time = "2025-09-29T23:21:15.979Z" }, + { url = "https://files.pythonhosted.org/packages/16/87/9472cf4a487d848476865321de18cc8c920b8cab98453ab79dbbc98db63a/pandas-2.3.3-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e32e7cc9af0f1cc15548288a51a3b681cc2a219faa838e995f7dc53dbab1062d", size = 11709872, upload-time = "2025-09-29T23:21:27.165Z" }, + { url = "https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:318d77e0e42a628c04dc56bcef4b40de67918f7041c2b061af1da41dcff670ac", size = 12306371, upload-time = "2025-09-29T23:21:40.532Z" }, + { url = "https://files.pythonhosted.org/packages/33/81/a3afc88fca4aa925804a27d2676d22dcd2031c2ebe08aabd0ae55b9ff282/pandas-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e0a175408804d566144e170d0476b15d78458795bb18f1304fb94160cabf40c", size = 12765333, upload-time = "2025-09-29T23:21:55.77Z" }, + { url = "https://files.pythonhosted.org/packages/8d/0f/b4d4ae743a83742f1153464cf1a8ecfafc3ac59722a0b5c8602310cb7158/pandas-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2d9ab0fc11822b5eece72ec9587e172f63cff87c00b062f6e37448ced4493", size = 13418120, upload-time = "2025-09-29T23:22:10.109Z" }, + { url = "https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f8bfc0e12dc78f777f323f55c58649591b2cd0c43534e8355c51d3fede5f4dee", size = 10993991, upload-time = "2025-09-29T23:25:04.889Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ca/3f8d4f49740799189e1395812f3bf23b5e8fc7c190827d55a610da72ce55/pandas-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:75ea25f9529fdec2d2e93a42c523962261e567d250b0013b16210e1d40d7c2e5", size = 12048227, upload-time = "2025-09-29T23:22:24.343Z" }, + { url = "https://files.pythonhosted.org/packages/0e/5a/f43efec3e8c0cc92c4663ccad372dbdff72b60bdb56b2749f04aa1d07d7e/pandas-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74ecdf1d301e812db96a465a525952f4dde225fdb6d8e5a521d47e1f42041e21", size = 11411056, upload-time = "2025-09-29T23:22:37.762Z" }, + { url = "https://files.pythonhosted.org/packages/46/b1/85331edfc591208c9d1a63a06baa67b21d332e63b7a591a5ba42a10bb507/pandas-2.3.3-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6435cb949cb34ec11cc9860246ccb2fdc9ecd742c12d3304989017d53f039a78", size = 11645189, upload-time = "2025-09-29T23:22:51.688Z" }, + { url = "https://files.pythonhosted.org/packages/44/23/78d645adc35d94d1ac4f2a3c4112ab6f5b8999f4898b8cdf01252f8df4a9/pandas-2.3.3-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:900f47d8f20860de523a1ac881c4c36d65efcb2eb850e6948140fa781736e110", size = 12121912, upload-time = "2025-09-29T23:23:05.042Z" }, + { url = "https://files.pythonhosted.org/packages/53/da/d10013df5e6aaef6b425aa0c32e1fc1f3e431e4bcabd420517dceadce354/pandas-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a45c765238e2ed7d7c608fc5bc4a6f88b642f2f01e70c0c23d2224dd21829d86", size = 12712160, upload-time = "2025-09-29T23:23:28.57Z" }, + { url = "https://files.pythonhosted.org/packages/bd/17/e756653095a083d8a37cbd816cb87148debcfcd920129b25f99dd8d04271/pandas-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c4fc4c21971a1a9f4bdb4c73978c7f7256caa3e62b323f70d6cb80db583350bc", size = 13199233, upload-time = "2025-09-29T23:24:24.876Z" }, + { url = "https://files.pythonhosted.org/packages/04/fd/74903979833db8390b73b3a8a7d30d146d710bd32703724dd9083950386f/pandas-2.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ee15f284898e7b246df8087fc82b87b01686f98ee67d85a17b7ab44143a3a9a0", size = 11540635, upload-time = "2025-09-29T23:25:52.486Z" }, + { url = "https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1611aedd912e1ff81ff41c745822980c49ce4a7907537be8692c8dbc31924593", size = 10759079, upload-time = "2025-09-29T23:26:33.204Z" }, + { url = "https://files.pythonhosted.org/packages/ca/05/d01ef80a7a3a12b2f8bbf16daba1e17c98a2f039cbc8e2f77a2c5a63d382/pandas-2.3.3-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d2cefc361461662ac48810cb14365a365ce864afe85ef1f447ff5a1e99ea81c", size = 11814049, upload-time = "2025-09-29T23:27:15.384Z" }, + { url = "https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ee67acbbf05014ea6c763beb097e03cd629961c8a632075eeb34247120abcb4b", size = 12332638, upload-time = "2025-09-29T23:27:51.625Z" }, + { url = "https://files.pythonhosted.org/packages/c5/33/dd70400631b62b9b29c3c93d2feee1d0964dc2bae2e5ad7a6c73a7f25325/pandas-2.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c46467899aaa4da076d5abc11084634e2d197e9460643dd455ac3db5856b24d6", size = 12886834, upload-time = "2025-09-29T23:28:21.289Z" }, + { url = "https://files.pythonhosted.org/packages/d3/18/b5d48f55821228d0d2692b34fd5034bb185e854bdb592e9c640f6290e012/pandas-2.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6253c72c6a1d990a410bc7de641d34053364ef8bcd3126f7e7450125887dffe3", size = 13409925, upload-time = "2025-09-29T23:28:58.261Z" }, + { url = "https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:1b07204a219b3b7350abaae088f451860223a52cfb8a6c53358e7948735158e5", size = 11109071, upload-time = "2025-09-29T23:32:27.484Z" }, + { url = "https://files.pythonhosted.org/packages/89/9c/0e21c895c38a157e0faa1fb64587a9226d6dd46452cac4532d80c3c4a244/pandas-2.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2462b1a365b6109d275250baaae7b760fd25c726aaca0054649286bcfbb3e8ec", size = 12048504, upload-time = "2025-09-29T23:29:31.47Z" }, + { url = "https://files.pythonhosted.org/packages/d7/82/b69a1c95df796858777b68fbe6a81d37443a33319761d7c652ce77797475/pandas-2.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0242fe9a49aa8b4d78a4fa03acb397a58833ef6199e9aa40a95f027bb3a1b6e7", size = 11410702, upload-time = "2025-09-29T23:29:54.591Z" }, + { url = "https://files.pythonhosted.org/packages/f9/88/702bde3ba0a94b8c73a0181e05144b10f13f29ebfc2150c3a79062a8195d/pandas-2.3.3-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a21d830e78df0a515db2b3d2f5570610f5e6bd2e27749770e8bb7b524b89b450", size = 11634535, upload-time = "2025-09-29T23:30:21.003Z" }, + { url = "https://files.pythonhosted.org/packages/a4/1e/1bac1a839d12e6a82ec6cb40cda2edde64a2013a66963293696bbf31fbbb/pandas-2.3.3-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e3ebdb170b5ef78f19bfb71b0dc5dc58775032361fa188e814959b74d726dd5", size = 12121582, upload-time = "2025-09-29T23:30:43.391Z" }, + { url = "https://files.pythonhosted.org/packages/44/91/483de934193e12a3b1d6ae7c8645d083ff88dec75f46e827562f1e4b4da6/pandas-2.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d051c0e065b94b7a3cea50eb1ec32e912cd96dba41647eb24104b6c6c14c5788", size = 12699963, upload-time = "2025-09-29T23:31:10.009Z" }, + { url = "https://files.pythonhosted.org/packages/70/44/5191d2e4026f86a2a109053e194d3ba7a31a2d10a9c2348368c63ed4e85a/pandas-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3869faf4bd07b3b66a9f462417d0ca3a9df29a9f6abd5d0d0dbab15dac7abe87", size = 13202175, upload-time = "2025-09-29T23:31:59.173Z" }, ] [[package]] @@ -2066,39 +2059,56 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, ] +[[package]] +name = "protobuf" +version = "6.33.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/25/7c72c307aafc96fa87062aa6291d9f7c94836e43214d43722e86037aac02/protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c", size = 444465, upload-time = "2026-01-29T21:51:33.494Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/79/af92d0a8369732b027e6d6084251dd8e782c685c72da161bd4a2e00fbabb/protobuf-6.33.5-cp310-abi3-win32.whl", hash = "sha256:d71b040839446bac0f4d162e758bea99c8251161dae9d0983a3b88dee345153b", size = 425769, upload-time = "2026-01-29T21:51:21.751Z" }, + { url = "https://files.pythonhosted.org/packages/55/75/bb9bc917d10e9ee13dee8607eb9ab963b7cf8be607c46e7862c748aa2af7/protobuf-6.33.5-cp310-abi3-win_amd64.whl", hash = "sha256:3093804752167bcab3998bec9f1048baae6e29505adaf1afd14a37bddede533c", size = 437118, upload-time = "2026-01-29T21:51:24.022Z" }, + { url = "https://files.pythonhosted.org/packages/a2/6b/e48dfc1191bc5b52950246275bf4089773e91cb5ba3592621723cdddca62/protobuf-6.33.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:a5cb85982d95d906df1e2210e58f8e4f1e3cdc088e52c921a041f9c9a0386de5", size = 427766, upload-time = "2026-01-29T21:51:25.413Z" }, + { url = "https://files.pythonhosted.org/packages/4e/b1/c79468184310de09d75095ed1314b839eb2f72df71097db9d1404a1b2717/protobuf-6.33.5-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:9b71e0281f36f179d00cbcb119cb19dec4d14a81393e5ea220f64b286173e190", size = 324638, upload-time = "2026-01-29T21:51:26.423Z" }, + { url = "https://files.pythonhosted.org/packages/c5/f5/65d838092fd01c44d16037953fd4c2cc851e783de9b8f02b27ec4ffd906f/protobuf-6.33.5-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8afa18e1d6d20af15b417e728e9f60f3aa108ee76f23c3b2c07a2c3b546d3afd", size = 339411, upload-time = "2026-01-29T21:51:27.446Z" }, + { url = "https://files.pythonhosted.org/packages/9b/53/a9443aa3ca9ba8724fdfa02dd1887c1bcd8e89556b715cfbacca6b63dbec/protobuf-6.33.5-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:cbf16ba3350fb7b889fca858fb215967792dc125b35c7976ca4818bee3521cf0", size = 323465, upload-time = "2026-01-29T21:51:28.925Z" }, + { url = "https://files.pythonhosted.org/packages/57/bf/2086963c69bdac3d7cff1cc7ff79b8ce5ea0bec6797a017e1be338a46248/protobuf-6.33.5-py3-none-any.whl", hash = "sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02", size = 170687, upload-time = "2026-01-29T21:51:32.557Z" }, +] + [[package]] name = "psutil" -version = "7.1.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e1/88/bdd0a41e5857d5d703287598cbf08dad90aed56774ea52ae071bae9071b6/psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74", size = 489059, upload-time = "2025-11-02T12:25:54.619Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bd/93/0c49e776b8734fef56ec9c5c57f923922f2cf0497d62e0f419465f28f3d0/psutil-7.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0005da714eee687b4b8decd3d6cc7c6db36215c9e74e5ad2264b90c3df7d92dc", size = 239751, upload-time = "2025-11-02T12:25:58.161Z" }, - { url = "https://files.pythonhosted.org/packages/6f/8d/b31e39c769e70780f007969815195a55c81a63efebdd4dbe9e7a113adb2f/psutil-7.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19644c85dcb987e35eeeaefdc3915d059dac7bd1167cdcdbf27e0ce2df0c08c0", size = 240368, upload-time = "2025-11-02T12:26:00.491Z" }, - { url = "https://files.pythonhosted.org/packages/62/61/23fd4acc3c9eebbf6b6c78bcd89e5d020cfde4acf0a9233e9d4e3fa698b4/psutil-7.1.3-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95ef04cf2e5ba0ab9eaafc4a11eaae91b44f4ef5541acd2ee91d9108d00d59a7", size = 287134, upload-time = "2025-11-02T12:26:02.613Z" }, - { url = "https://files.pythonhosted.org/packages/30/1c/f921a009ea9ceb51aa355cb0cc118f68d354db36eae18174bab63affb3e6/psutil-7.1.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1068c303be3a72f8e18e412c5b2a8f6d31750fb152f9cb106b54090296c9d251", size = 289904, upload-time = "2025-11-02T12:26:05.207Z" }, - { url = "https://files.pythonhosted.org/packages/a6/82/62d68066e13e46a5116df187d319d1724b3f437ddd0f958756fc052677f4/psutil-7.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:18349c5c24b06ac5612c0428ec2a0331c26443d259e2a0144a9b24b4395b58fa", size = 249642, upload-time = "2025-11-02T12:26:07.447Z" }, - { url = "https://files.pythonhosted.org/packages/df/ad/c1cd5fe965c14a0392112f68362cfceb5230819dbb5b1888950d18a11d9f/psutil-7.1.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c525ffa774fe4496282fb0b1187725793de3e7c6b29e41562733cae9ada151ee", size = 245518, upload-time = "2025-11-02T12:26:09.719Z" }, - { url = "https://files.pythonhosted.org/packages/2e/bb/6670bded3e3236eb4287c7bcdc167e9fae6e1e9286e437f7111caed2f909/psutil-7.1.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b403da1df4d6d43973dc004d19cee3b848e998ae3154cc8097d139b77156c353", size = 239843, upload-time = "2025-11-02T12:26:11.968Z" }, - { url = "https://files.pythonhosted.org/packages/b8/66/853d50e75a38c9a7370ddbeefabdd3d3116b9c31ef94dc92c6729bc36bec/psutil-7.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad81425efc5e75da3f39b3e636293360ad8d0b49bed7df824c79764fb4ba9b8b", size = 240369, upload-time = "2025-11-02T12:26:14.358Z" }, - { url = "https://files.pythonhosted.org/packages/41/bd/313aba97cb5bfb26916dc29cf0646cbe4dd6a89ca69e8c6edce654876d39/psutil-7.1.3-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f33a3702e167783a9213db10ad29650ebf383946e91bc77f28a5eb083496bc9", size = 288210, upload-time = "2025-11-02T12:26:16.699Z" }, - { url = "https://files.pythonhosted.org/packages/c2/fa/76e3c06e760927a0cfb5705eb38164254de34e9bd86db656d4dbaa228b04/psutil-7.1.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fac9cd332c67f4422504297889da5ab7e05fd11e3c4392140f7370f4208ded1f", size = 291182, upload-time = "2025-11-02T12:26:18.848Z" }, - { url = "https://files.pythonhosted.org/packages/0f/1d/5774a91607035ee5078b8fd747686ebec28a962f178712de100d00b78a32/psutil-7.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:3792983e23b69843aea49c8f5b8f115572c5ab64c153bada5270086a2123c7e7", size = 250466, upload-time = "2025-11-02T12:26:21.183Z" }, - { url = "https://files.pythonhosted.org/packages/00/ca/e426584bacb43a5cb1ac91fae1937f478cd8fbe5e4ff96574e698a2c77cd/psutil-7.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:31d77fcedb7529f27bb3a0472bea9334349f9a04160e8e6e5020f22c59893264", size = 245756, upload-time = "2025-11-02T12:26:23.148Z" }, - { url = "https://files.pythonhosted.org/packages/ef/94/46b9154a800253e7ecff5aaacdf8ebf43db99de4a2dfa18575b02548654e/psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab", size = 238359, upload-time = "2025-11-02T12:26:25.284Z" }, - { url = "https://files.pythonhosted.org/packages/68/3a/9f93cff5c025029a36d9a92fef47220ab4692ee7f2be0fba9f92813d0cb8/psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880", size = 239171, upload-time = "2025-11-02T12:26:27.23Z" }, - { url = "https://files.pythonhosted.org/packages/ce/b1/5f49af514f76431ba4eea935b8ad3725cdeb397e9245ab919dbc1d1dc20f/psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3", size = 263261, upload-time = "2025-11-02T12:26:29.48Z" }, - { url = "https://files.pythonhosted.org/packages/e0/95/992c8816a74016eb095e73585d747e0a8ea21a061ed3689474fabb29a395/psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b", size = 264635, upload-time = "2025-11-02T12:26:31.74Z" }, - { url = "https://files.pythonhosted.org/packages/55/4c/c3ed1a622b6ae2fd3c945a366e64eb35247a31e4db16cf5095e269e8eb3c/psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd", size = 247633, upload-time = "2025-11-02T12:26:33.887Z" }, - { url = "https://files.pythonhosted.org/packages/c9/ad/33b2ccec09bf96c2b2ef3f9a6f66baac8253d7565d8839e024a6b905d45d/psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1", size = 244608, upload-time = "2025-11-02T12:26:36.136Z" }, +version = "7.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/c6/d1ddf4abb55e93cebc4f2ed8b5d6dbad109ecb8d63748dd2b20ab5e57ebe/psutil-7.2.2.tar.gz", hash = "sha256:0746f5f8d406af344fd547f1c8daa5f5c33dbc293bb8d6a16d80b4bb88f59372", size = 493740, upload-time = "2026-01-28T18:14:54.428Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/08/510cbdb69c25a96f4ae523f733cdc963ae654904e8db864c07585ef99875/psutil-7.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2edccc433cbfa046b980b0df0171cd25bcaeb3a68fe9022db0979e7aa74a826b", size = 130595, upload-time = "2026-01-28T18:14:57.293Z" }, + { url = "https://files.pythonhosted.org/packages/d6/f5/97baea3fe7a5a9af7436301f85490905379b1c6f2dd51fe3ecf24b4c5fbf/psutil-7.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78c8603dcd9a04c7364f1a3e670cea95d51ee865e4efb3556a3a63adef958ea", size = 131082, upload-time = "2026-01-28T18:14:59.732Z" }, + { url = "https://files.pythonhosted.org/packages/37/d6/246513fbf9fa174af531f28412297dd05241d97a75911ac8febefa1a53c6/psutil-7.2.2-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a571f2330c966c62aeda00dd24620425d4b0cc86881c89861fbc04549e5dc63", size = 181476, upload-time = "2026-01-28T18:15:01.884Z" }, + { url = "https://files.pythonhosted.org/packages/b8/b5/9182c9af3836cca61696dabe4fd1304e17bc56cb62f17439e1154f225dd3/psutil-7.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:917e891983ca3c1887b4ef36447b1e0873e70c933afc831c6b6da078ba474312", size = 184062, upload-time = "2026-01-28T18:15:04.436Z" }, + { url = "https://files.pythonhosted.org/packages/16/ba/0756dca669f5a9300d0cbcbfae9a4c30e446dfc7440ffe43ded5724bfd93/psutil-7.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:ab486563df44c17f5173621c7b198955bd6b613fb87c71c161f827d3fb149a9b", size = 139893, upload-time = "2026-01-28T18:15:06.378Z" }, + { url = "https://files.pythonhosted.org/packages/1c/61/8fa0e26f33623b49949346de05ec1ddaad02ed8ba64af45f40a147dbfa97/psutil-7.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:ae0aefdd8796a7737eccea863f80f81e468a1e4cf14d926bd9b6f5f2d5f90ca9", size = 135589, upload-time = "2026-01-28T18:15:08.03Z" }, + { url = "https://files.pythonhosted.org/packages/81/69/ef179ab5ca24f32acc1dac0c247fd6a13b501fd5534dbae0e05a1c48b66d/psutil-7.2.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:eed63d3b4d62449571547b60578c5b2c4bcccc5387148db46e0c2313dad0ee00", size = 130664, upload-time = "2026-01-28T18:15:09.469Z" }, + { url = "https://files.pythonhosted.org/packages/7b/64/665248b557a236d3fa9efc378d60d95ef56dd0a490c2cd37dafc7660d4a9/psutil-7.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7b6d09433a10592ce39b13d7be5a54fbac1d1228ed29abc880fb23df7cb694c9", size = 131087, upload-time = "2026-01-28T18:15:11.724Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2e/e6782744700d6759ebce3043dcfa661fb61e2fb752b91cdeae9af12c2178/psutil-7.2.2-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fa4ecf83bcdf6e6c8f4449aff98eefb5d0604bf88cb883d7da3d8d2d909546a", size = 182383, upload-time = "2026-01-28T18:15:13.445Z" }, + { url = "https://files.pythonhosted.org/packages/57/49/0a41cefd10cb7505cdc04dab3eacf24c0c2cb158a998b8c7b1d27ee2c1f5/psutil-7.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e452c464a02e7dc7822a05d25db4cde564444a67e58539a00f929c51eddda0cf", size = 185210, upload-time = "2026-01-28T18:15:16.002Z" }, + { url = "https://files.pythonhosted.org/packages/dd/2c/ff9bfb544f283ba5f83ba725a3c5fec6d6b10b8f27ac1dc641c473dc390d/psutil-7.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c7663d4e37f13e884d13994247449e9f8f574bc4655d509c3b95e9ec9e2b9dc1", size = 141228, upload-time = "2026-01-28T18:15:18.385Z" }, + { url = "https://files.pythonhosted.org/packages/f2/fc/f8d9c31db14fcec13748d373e668bc3bed94d9077dbc17fb0eebc073233c/psutil-7.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:11fe5a4f613759764e79c65cf11ebdf26e33d6dd34336f8a337aa2996d71c841", size = 136284, upload-time = "2026-01-28T18:15:19.912Z" }, + { url = "https://files.pythonhosted.org/packages/e7/36/5ee6e05c9bd427237b11b3937ad82bb8ad2752d72c6969314590dd0c2f6e/psutil-7.2.2-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ed0cace939114f62738d808fdcecd4c869222507e266e574799e9c0faa17d486", size = 129090, upload-time = "2026-01-28T18:15:22.168Z" }, + { url = "https://files.pythonhosted.org/packages/80/c4/f5af4c1ca8c1eeb2e92ccca14ce8effdeec651d5ab6053c589b074eda6e1/psutil-7.2.2-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:1a7b04c10f32cc88ab39cbf606e117fd74721c831c98a27dc04578deb0c16979", size = 129859, upload-time = "2026-01-28T18:15:23.795Z" }, + { url = "https://files.pythonhosted.org/packages/b5/70/5d8df3b09e25bce090399cf48e452d25c935ab72dad19406c77f4e828045/psutil-7.2.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:076a2d2f923fd4821644f5ba89f059523da90dc9014e85f8e45a5774ca5bc6f9", size = 155560, upload-time = "2026-01-28T18:15:25.976Z" }, + { url = "https://files.pythonhosted.org/packages/63/65/37648c0c158dc222aba51c089eb3bdfa238e621674dc42d48706e639204f/psutil-7.2.2-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0726cecd84f9474419d67252add4ac0cd9811b04d61123054b9fb6f57df6e9e", size = 156997, upload-time = "2026-01-28T18:15:27.794Z" }, + { url = "https://files.pythonhosted.org/packages/8e/13/125093eadae863ce03c6ffdbae9929430d116a246ef69866dad94da3bfbc/psutil-7.2.2-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fd04ef36b4a6d599bbdb225dd1d3f51e00105f6d48a28f006da7f9822f2606d8", size = 148972, upload-time = "2026-01-28T18:15:29.342Z" }, + { url = "https://files.pythonhosted.org/packages/04/78/0acd37ca84ce3ddffaa92ef0f571e073faa6d8ff1f0559ab1272188ea2be/psutil-7.2.2-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b58fabe35e80b264a4e3bb23e6b96f9e45a3df7fb7eed419ac0e5947c61e47cc", size = 148266, upload-time = "2026-01-28T18:15:31.597Z" }, + { url = "https://files.pythonhosted.org/packages/b4/90/e2159492b5426be0c1fef7acba807a03511f97c5f86b3caeda6ad92351a7/psutil-7.2.2-cp37-abi3-win_amd64.whl", hash = "sha256:eb7e81434c8d223ec4a219b5fc1c47d0417b12be7ea866e24fb5ad6e84b3d988", size = 137737, upload-time = "2026-01-28T18:15:33.849Z" }, + { url = "https://files.pythonhosted.org/packages/8c/c7/7bb2e321574b10df20cbde462a94e2b71d05f9bbda251ef27d104668306a/psutil-7.2.2-cp37-abi3-win_arm64.whl", hash = "sha256:8c233660f575a5a89e6d4cb65d9f938126312bca76d8fe087b947b3a1aaac9ee", size = 134617, upload-time = "2026-01-28T18:15:36.514Z" }, ] [[package]] name = "pyasn1" -version = "0.6.1" +version = "0.6.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/b6/6e630dff89739fcd427e3f72b3d905ce0acb85a45d4ec3e2678718a3487f/pyasn1-0.6.2.tar.gz", hash = "sha256:9b59a2b25ba7e4f8197db7686c09fb33e658b98339fadb826e9512629017833b", size = 146586, upload-time = "2026-01-16T18:04:18.534Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, + { url = "https://files.pythonhosted.org/packages/44/b5/a96872e5184f354da9c84ae119971a0a4c221fe9b27a4d94bd43f2596727/pyasn1-0.6.2-py3-none-any.whl", hash = "sha256:1eb26d860996a18e9b6ed05e7aae0e9fc21619fcee6af91cca9bad4fbea224bf", size = 83371, upload-time = "2026-01-16T18:04:17.174Z" }, ] [[package]] @@ -2115,11 +2125,11 @@ wheels = [ [[package]] name = "pycparser" -version = "2.23" +version = "3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1b/7d/92392ff7815c21062bea51aa7b87d45576f649f16458d78b7cf94b9ab2e6/pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", size = 103492, upload-time = "2026-01-21T14:26:51.89Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992", size = 48172, upload-time = "2026-01-21T14:26:50.693Z" }, ] [[package]] @@ -2245,7 +2255,7 @@ wheels = [ [[package]] name = "pytest" -version = "9.0.1" +version = "9.0.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -2254,9 +2264,9 @@ dependencies = [ { name = "pluggy" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/07/56/f013048ac4bc4c1d9be45afd4ab209ea62822fb1598f40687e6bf45dcea4/pytest-9.0.1.tar.gz", hash = "sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8", size = 1564125, upload-time = "2025-11-12T13:05:09.333Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0b/8b/6300fb80f858cda1c51ffa17075df5d846757081d11ab4aa35cef9e6258b/pytest-9.0.1-py3-none-any.whl", hash = "sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad", size = 373668, upload-time = "2025-11-12T13:05:07.379Z" }, + { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, ] [[package]] @@ -2396,94 +2406,106 @@ wheels = [ [[package]] name = "regex" -version = "2025.11.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/a9/546676f25e573a4cf00fe8e119b78a37b6a8fe2dc95cda877b30889c9c45/regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01", size = 414669, upload-time = "2025-11-03T21:34:22.089Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/90/4fb5056e5f03a7048abd2b11f598d464f0c167de4f2a51aa868c376b8c70/regex-2025.11.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eadade04221641516fa25139273505a1c19f9bf97589a05bc4cfcd8b4a618031", size = 488081, upload-time = "2025-11-03T21:31:11.946Z" }, - { url = "https://files.pythonhosted.org/packages/85/23/63e481293fac8b069d84fba0299b6666df720d875110efd0338406b5d360/regex-2025.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feff9e54ec0dd3833d659257f5c3f5322a12eee58ffa360984b716f8b92983f4", size = 290554, upload-time = "2025-11-03T21:31:13.387Z" }, - { url = "https://files.pythonhosted.org/packages/2b/9d/b101d0262ea293a0066b4522dfb722eb6a8785a8c3e084396a5f2c431a46/regex-2025.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3b30bc921d50365775c09a7ed446359e5c0179e9e2512beec4a60cbcef6ddd50", size = 288407, upload-time = "2025-11-03T21:31:14.809Z" }, - { url = "https://files.pythonhosted.org/packages/0c/64/79241c8209d5b7e00577ec9dca35cd493cc6be35b7d147eda367d6179f6d/regex-2025.11.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f99be08cfead2020c7ca6e396c13543baea32343b7a9a5780c462e323bd8872f", size = 793418, upload-time = "2025-11-03T21:31:16.556Z" }, - { url = "https://files.pythonhosted.org/packages/3d/e2/23cd5d3573901ce8f9757c92ca4db4d09600b865919b6d3e7f69f03b1afd/regex-2025.11.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6dd329a1b61c0ee95ba95385fb0c07ea0d3fe1a21e1349fa2bec272636217118", size = 860448, upload-time = "2025-11-03T21:31:18.12Z" }, - { url = "https://files.pythonhosted.org/packages/2a/4c/aecf31beeaa416d0ae4ecb852148d38db35391aac19c687b5d56aedf3a8b/regex-2025.11.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4c5238d32f3c5269d9e87be0cf096437b7622b6920f5eac4fd202468aaeb34d2", size = 907139, upload-time = "2025-11-03T21:31:20.753Z" }, - { url = "https://files.pythonhosted.org/packages/61/22/b8cb00df7d2b5e0875f60628594d44dba283e951b1ae17c12f99e332cc0a/regex-2025.11.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10483eefbfb0adb18ee9474498c9a32fcf4e594fbca0543bb94c48bac6183e2e", size = 800439, upload-time = "2025-11-03T21:31:22.069Z" }, - { url = "https://files.pythonhosted.org/packages/02/a8/c4b20330a5cdc7a8eb265f9ce593f389a6a88a0c5f280cf4d978f33966bc/regex-2025.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:78c2d02bb6e1da0720eedc0bad578049cad3f71050ef8cd065ecc87691bed2b0", size = 782965, upload-time = "2025-11-03T21:31:23.598Z" }, - { url = "https://files.pythonhosted.org/packages/b4/4c/ae3e52988ae74af4b04d2af32fee4e8077f26e51b62ec2d12d246876bea2/regex-2025.11.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b49cd2aad93a1790ce9cffb18964f6d3a4b0b3dbdbd5de094b65296fce6e58", size = 854398, upload-time = "2025-11-03T21:31:25.008Z" }, - { url = "https://files.pythonhosted.org/packages/06/d1/a8b9cf45874eda14b2e275157ce3b304c87e10fb38d9fc26a6e14eb18227/regex-2025.11.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:885b26aa3ee56433b630502dc3d36ba78d186a00cc535d3806e6bfd9ed3c70ab", size = 845897, upload-time = "2025-11-03T21:31:26.427Z" }, - { url = "https://files.pythonhosted.org/packages/ea/fe/1830eb0236be93d9b145e0bd8ab499f31602fe0999b1f19e99955aa8fe20/regex-2025.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd76a9f58e6a00f8772e72cff8ebcff78e022be95edf018766707c730593e1e", size = 788906, upload-time = "2025-11-03T21:31:28.078Z" }, - { url = "https://files.pythonhosted.org/packages/66/47/dc2577c1f95f188c1e13e2e69d8825a5ac582ac709942f8a03af42ed6e93/regex-2025.11.3-cp311-cp311-win32.whl", hash = "sha256:3e816cc9aac1cd3cc9a4ec4d860f06d40f994b5c7b4d03b93345f44e08cc68bf", size = 265812, upload-time = "2025-11-03T21:31:29.72Z" }, - { url = "https://files.pythonhosted.org/packages/50/1e/15f08b2f82a9bbb510621ec9042547b54d11e83cb620643ebb54e4eb7d71/regex-2025.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:087511f5c8b7dfbe3a03f5d5ad0c2a33861b1fc387f21f6f60825a44865a385a", size = 277737, upload-time = "2025-11-03T21:31:31.422Z" }, - { url = "https://files.pythonhosted.org/packages/f4/fc/6500eb39f5f76c5e47a398df82e6b535a5e345f839581012a418b16f9cc3/regex-2025.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:1ff0d190c7f68ae7769cd0313fe45820ba07ffebfddfaa89cc1eb70827ba0ddc", size = 270290, upload-time = "2025-11-03T21:31:33.041Z" }, - { url = "https://files.pythonhosted.org/packages/e8/74/18f04cb53e58e3fb107439699bd8375cf5a835eec81084e0bddbd122e4c2/regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41", size = 489312, upload-time = "2025-11-03T21:31:34.343Z" }, - { url = "https://files.pythonhosted.org/packages/78/3f/37fcdd0d2b1e78909108a876580485ea37c91e1acf66d3bb8e736348f441/regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36", size = 291256, upload-time = "2025-11-03T21:31:35.675Z" }, - { url = "https://files.pythonhosted.org/packages/bf/26/0a575f58eb23b7ebd67a45fccbc02ac030b737b896b7e7a909ffe43ffd6a/regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1", size = 288921, upload-time = "2025-11-03T21:31:37.07Z" }, - { url = "https://files.pythonhosted.org/packages/ea/98/6a8dff667d1af907150432cf5abc05a17ccd32c72a3615410d5365ac167a/regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7", size = 798568, upload-time = "2025-11-03T21:31:38.784Z" }, - { url = "https://files.pythonhosted.org/packages/64/15/92c1db4fa4e12733dd5a526c2dd2b6edcbfe13257e135fc0f6c57f34c173/regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69", size = 864165, upload-time = "2025-11-03T21:31:40.559Z" }, - { url = "https://files.pythonhosted.org/packages/f9/e7/3ad7da8cdee1ce66c7cd37ab5ab05c463a86ffeb52b1a25fe7bd9293b36c/regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48", size = 912182, upload-time = "2025-11-03T21:31:42.002Z" }, - { url = "https://files.pythonhosted.org/packages/84/bd/9ce9f629fcb714ffc2c3faf62b6766ecb7a585e1e885eb699bcf130a5209/regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c", size = 803501, upload-time = "2025-11-03T21:31:43.815Z" }, - { url = "https://files.pythonhosted.org/packages/7c/0f/8dc2e4349d8e877283e6edd6c12bdcebc20f03744e86f197ab6e4492bf08/regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695", size = 787842, upload-time = "2025-11-03T21:31:45.353Z" }, - { url = "https://files.pythonhosted.org/packages/f9/73/cff02702960bc185164d5619c0c62a2f598a6abff6695d391b096237d4ab/regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98", size = 858519, upload-time = "2025-11-03T21:31:46.814Z" }, - { url = "https://files.pythonhosted.org/packages/61/83/0e8d1ae71e15bc1dc36231c90b46ee35f9d52fab2e226b0e039e7ea9c10a/regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74", size = 850611, upload-time = "2025-11-03T21:31:48.289Z" }, - { url = "https://files.pythonhosted.org/packages/c8/f5/70a5cdd781dcfaa12556f2955bf170cd603cb1c96a1827479f8faea2df97/regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0", size = 789759, upload-time = "2025-11-03T21:31:49.759Z" }, - { url = "https://files.pythonhosted.org/packages/59/9b/7c29be7903c318488983e7d97abcf8ebd3830e4c956c4c540005fcfb0462/regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204", size = 266194, upload-time = "2025-11-03T21:31:51.53Z" }, - { url = "https://files.pythonhosted.org/packages/1a/67/3b92df89f179d7c367be654ab5626ae311cb28f7d5c237b6bb976cd5fbbb/regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9", size = 277069, upload-time = "2025-11-03T21:31:53.151Z" }, - { url = "https://files.pythonhosted.org/packages/d7/55/85ba4c066fe5094d35b249c3ce8df0ba623cfd35afb22d6764f23a52a1c5/regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26", size = 270330, upload-time = "2025-11-03T21:31:54.514Z" }, - { url = "https://files.pythonhosted.org/packages/e1/a7/dda24ebd49da46a197436ad96378f17df30ceb40e52e859fc42cac45b850/regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4", size = 489081, upload-time = "2025-11-03T21:31:55.9Z" }, - { url = "https://files.pythonhosted.org/packages/19/22/af2dc751aacf88089836aa088a1a11c4f21a04707eb1b0478e8e8fb32847/regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76", size = 291123, upload-time = "2025-11-03T21:31:57.758Z" }, - { url = "https://files.pythonhosted.org/packages/a3/88/1a3ea5672f4b0a84802ee9891b86743438e7c04eb0b8f8c4e16a42375327/regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a", size = 288814, upload-time = "2025-11-03T21:32:01.12Z" }, - { url = "https://files.pythonhosted.org/packages/fb/8c/f5987895bf42b8ddeea1b315c9fedcfe07cadee28b9c98cf50d00adcb14d/regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361", size = 798592, upload-time = "2025-11-03T21:32:03.006Z" }, - { url = "https://files.pythonhosted.org/packages/99/2a/6591ebeede78203fa77ee46a1c36649e02df9eaa77a033d1ccdf2fcd5d4e/regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160", size = 864122, upload-time = "2025-11-03T21:32:04.553Z" }, - { url = "https://files.pythonhosted.org/packages/94/d6/be32a87cf28cf8ed064ff281cfbd49aefd90242a83e4b08b5a86b38e8eb4/regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe", size = 912272, upload-time = "2025-11-03T21:32:06.148Z" }, - { url = "https://files.pythonhosted.org/packages/62/11/9bcef2d1445665b180ac7f230406ad80671f0fc2a6ffb93493b5dd8cd64c/regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850", size = 803497, upload-time = "2025-11-03T21:32:08.162Z" }, - { url = "https://files.pythonhosted.org/packages/e5/a7/da0dc273d57f560399aa16d8a68ae7f9b57679476fc7ace46501d455fe84/regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc", size = 787892, upload-time = "2025-11-03T21:32:09.769Z" }, - { url = "https://files.pythonhosted.org/packages/da/4b/732a0c5a9736a0b8d6d720d4945a2f1e6f38f87f48f3173559f53e8d5d82/regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9", size = 858462, upload-time = "2025-11-03T21:32:11.769Z" }, - { url = "https://files.pythonhosted.org/packages/0c/f5/a2a03df27dc4c2d0c769220f5110ba8c4084b0bfa9ab0f9b4fcfa3d2b0fc/regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b", size = 850528, upload-time = "2025-11-03T21:32:13.906Z" }, - { url = "https://files.pythonhosted.org/packages/d6/09/e1cd5bee3841c7f6eb37d95ca91cdee7100b8f88b81e41c2ef426910891a/regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7", size = 789866, upload-time = "2025-11-03T21:32:15.748Z" }, - { url = "https://files.pythonhosted.org/packages/eb/51/702f5ea74e2a9c13d855a6a85b7f80c30f9e72a95493260193c07f3f8d74/regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c", size = 266189, upload-time = "2025-11-03T21:32:17.493Z" }, - { url = "https://files.pythonhosted.org/packages/8b/00/6e29bb314e271a743170e53649db0fdb8e8ff0b64b4f425f5602f4eb9014/regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5", size = 277054, upload-time = "2025-11-03T21:32:19.042Z" }, - { url = "https://files.pythonhosted.org/packages/25/f1/b156ff9f2ec9ac441710764dda95e4edaf5f36aca48246d1eea3f1fd96ec/regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467", size = 270325, upload-time = "2025-11-03T21:32:21.338Z" }, - { url = "https://files.pythonhosted.org/packages/20/28/fd0c63357caefe5680b8ea052131acbd7f456893b69cc2a90cc3e0dc90d4/regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281", size = 491984, upload-time = "2025-11-03T21:32:23.466Z" }, - { url = "https://files.pythonhosted.org/packages/df/ec/7014c15626ab46b902b3bcc4b28a7bae46d8f281fc7ea9c95e22fcaaa917/regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39", size = 292673, upload-time = "2025-11-03T21:32:25.034Z" }, - { url = "https://files.pythonhosted.org/packages/23/ab/3b952ff7239f20d05f1f99e9e20188513905f218c81d52fb5e78d2bf7634/regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7", size = 291029, upload-time = "2025-11-03T21:32:26.528Z" }, - { url = "https://files.pythonhosted.org/packages/21/7e/3dc2749fc684f455f162dcafb8a187b559e2614f3826877d3844a131f37b/regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed", size = 807437, upload-time = "2025-11-03T21:32:28.363Z" }, - { url = "https://files.pythonhosted.org/packages/1b/0b/d529a85ab349c6a25d1ca783235b6e3eedf187247eab536797021f7126c6/regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19", size = 873368, upload-time = "2025-11-03T21:32:30.4Z" }, - { url = "https://files.pythonhosted.org/packages/7d/18/2d868155f8c9e3e9d8f9e10c64e9a9f496bb8f7e037a88a8bed26b435af6/regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b", size = 914921, upload-time = "2025-11-03T21:32:32.123Z" }, - { url = "https://files.pythonhosted.org/packages/2d/71/9d72ff0f354fa783fe2ba913c8734c3b433b86406117a8db4ea2bf1c7a2f/regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a", size = 812708, upload-time = "2025-11-03T21:32:34.305Z" }, - { url = "https://files.pythonhosted.org/packages/e7/19/ce4bf7f5575c97f82b6e804ffb5c4e940c62609ab2a0d9538d47a7fdf7d4/regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6", size = 795472, upload-time = "2025-11-03T21:32:36.364Z" }, - { url = "https://files.pythonhosted.org/packages/03/86/fd1063a176ffb7b2315f9a1b08d17b18118b28d9df163132615b835a26ee/regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce", size = 868341, upload-time = "2025-11-03T21:32:38.042Z" }, - { url = "https://files.pythonhosted.org/packages/12/43/103fb2e9811205e7386366501bc866a164a0430c79dd59eac886a2822950/regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd", size = 854666, upload-time = "2025-11-03T21:32:40.079Z" }, - { url = "https://files.pythonhosted.org/packages/7d/22/e392e53f3869b75804762c7c848bd2dd2abf2b70fb0e526f58724638bd35/regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2", size = 799473, upload-time = "2025-11-03T21:32:42.148Z" }, - { url = "https://files.pythonhosted.org/packages/4f/f9/8bd6b656592f925b6845fcbb4d57603a3ac2fb2373344ffa1ed70aa6820a/regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a", size = 268792, upload-time = "2025-11-03T21:32:44.13Z" }, - { url = "https://files.pythonhosted.org/packages/e5/87/0e7d603467775ff65cd2aeabf1b5b50cc1c3708556a8b849a2fa4dd1542b/regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c", size = 280214, upload-time = "2025-11-03T21:32:45.853Z" }, - { url = "https://files.pythonhosted.org/packages/8d/d0/2afc6f8e94e2b64bfb738a7c2b6387ac1699f09f032d363ed9447fd2bb57/regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e", size = 271469, upload-time = "2025-11-03T21:32:48.026Z" }, - { url = "https://files.pythonhosted.org/packages/31/e9/f6e13de7e0983837f7b6d238ad9458800a874bf37c264f7923e63409944c/regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6", size = 489089, upload-time = "2025-11-03T21:32:50.027Z" }, - { url = "https://files.pythonhosted.org/packages/a3/5c/261f4a262f1fa65141c1b74b255988bd2fa020cc599e53b080667d591cfc/regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4", size = 291059, upload-time = "2025-11-03T21:32:51.682Z" }, - { url = "https://files.pythonhosted.org/packages/8e/57/f14eeb7f072b0e9a5a090d1712741fd8f214ec193dba773cf5410108bb7d/regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73", size = 288900, upload-time = "2025-11-03T21:32:53.569Z" }, - { url = "https://files.pythonhosted.org/packages/3c/6b/1d650c45e99a9b327586739d926a1cd4e94666b1bd4af90428b36af66dc7/regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f", size = 799010, upload-time = "2025-11-03T21:32:55.222Z" }, - { url = "https://files.pythonhosted.org/packages/99/ee/d66dcbc6b628ce4e3f7f0cbbb84603aa2fc0ffc878babc857726b8aab2e9/regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d", size = 864893, upload-time = "2025-11-03T21:32:57.239Z" }, - { url = "https://files.pythonhosted.org/packages/bf/2d/f238229f1caba7ac87a6c4153d79947fb0261415827ae0f77c304260c7d3/regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be", size = 911522, upload-time = "2025-11-03T21:32:59.274Z" }, - { url = "https://files.pythonhosted.org/packages/bd/3d/22a4eaba214a917c80e04f6025d26143690f0419511e0116508e24b11c9b/regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db", size = 803272, upload-time = "2025-11-03T21:33:01.393Z" }, - { url = "https://files.pythonhosted.org/packages/84/b1/03188f634a409353a84b5ef49754b97dbcc0c0f6fd6c8ede505a8960a0a4/regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62", size = 787958, upload-time = "2025-11-03T21:33:03.379Z" }, - { url = "https://files.pythonhosted.org/packages/99/6a/27d072f7fbf6fadd59c64d210305e1ff865cc3b78b526fd147db768c553b/regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f", size = 859289, upload-time = "2025-11-03T21:33:05.374Z" }, - { url = "https://files.pythonhosted.org/packages/9a/70/1b3878f648e0b6abe023172dacb02157e685564853cc363d9961bcccde4e/regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02", size = 850026, upload-time = "2025-11-03T21:33:07.131Z" }, - { url = "https://files.pythonhosted.org/packages/dd/d5/68e25559b526b8baab8e66839304ede68ff6727237a47727d240006bd0ff/regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed", size = 789499, upload-time = "2025-11-03T21:33:09.141Z" }, - { url = "https://files.pythonhosted.org/packages/fc/df/43971264857140a350910d4e33df725e8c94dd9dee8d2e4729fa0d63d49e/regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4", size = 271604, upload-time = "2025-11-03T21:33:10.9Z" }, - { url = "https://files.pythonhosted.org/packages/01/6f/9711b57dc6894a55faf80a4c1b5aa4f8649805cb9c7aef46f7d27e2b9206/regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad", size = 280320, upload-time = "2025-11-03T21:33:12.572Z" }, - { url = "https://files.pythonhosted.org/packages/f1/7e/f6eaa207d4377481f5e1775cdeb5a443b5a59b392d0065f3417d31d80f87/regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f", size = 273372, upload-time = "2025-11-03T21:33:14.219Z" }, - { url = "https://files.pythonhosted.org/packages/c3/06/49b198550ee0f5e4184271cee87ba4dfd9692c91ec55289e6282f0f86ccf/regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc", size = 491985, upload-time = "2025-11-03T21:33:16.555Z" }, - { url = "https://files.pythonhosted.org/packages/ce/bf/abdafade008f0b1c9da10d934034cb670432d6cf6cbe38bbb53a1cfd6cf8/regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49", size = 292669, upload-time = "2025-11-03T21:33:18.32Z" }, - { url = "https://files.pythonhosted.org/packages/f9/ef/0c357bb8edbd2ad8e273fcb9e1761bc37b8acbc6e1be050bebd6475f19c1/regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536", size = 291030, upload-time = "2025-11-03T21:33:20.048Z" }, - { url = "https://files.pythonhosted.org/packages/79/06/edbb67257596649b8fb088d6aeacbcb248ac195714b18a65e018bf4c0b50/regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95", size = 807674, upload-time = "2025-11-03T21:33:21.797Z" }, - { url = "https://files.pythonhosted.org/packages/f4/d9/ad4deccfce0ea336296bd087f1a191543bb99ee1c53093dcd4c64d951d00/regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009", size = 873451, upload-time = "2025-11-03T21:33:23.741Z" }, - { url = "https://files.pythonhosted.org/packages/13/75/a55a4724c56ef13e3e04acaab29df26582f6978c000ac9cd6810ad1f341f/regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9", size = 914980, upload-time = "2025-11-03T21:33:25.999Z" }, - { url = "https://files.pythonhosted.org/packages/67/1e/a1657ee15bd9116f70d4a530c736983eed997b361e20ecd8f5ca3759d5c5/regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d", size = 812852, upload-time = "2025-11-03T21:33:27.852Z" }, - { url = "https://files.pythonhosted.org/packages/b8/6f/f7516dde5506a588a561d296b2d0044839de06035bb486b326065b4c101e/regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6", size = 795566, upload-time = "2025-11-03T21:33:32.364Z" }, - { url = "https://files.pythonhosted.org/packages/d9/dd/3d10b9e170cc16fb34cb2cef91513cf3df65f440b3366030631b2984a264/regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154", size = 868463, upload-time = "2025-11-03T21:33:34.459Z" }, - { url = "https://files.pythonhosted.org/packages/f5/8e/935e6beff1695aa9085ff83195daccd72acc82c81793df480f34569330de/regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267", size = 854694, upload-time = "2025-11-03T21:33:36.793Z" }, - { url = "https://files.pythonhosted.org/packages/92/12/10650181a040978b2f5720a6a74d44f841371a3d984c2083fc1752e4acf6/regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379", size = 799691, upload-time = "2025-11-03T21:33:39.079Z" }, - { url = "https://files.pythonhosted.org/packages/67/90/8f37138181c9a7690e7e4cb388debbd389342db3c7381d636d2875940752/regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38", size = 274583, upload-time = "2025-11-03T21:33:41.302Z" }, - { url = "https://files.pythonhosted.org/packages/8f/cd/867f5ec442d56beb56f5f854f40abcfc75e11d10b11fdb1869dd39c63aaf/regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de", size = 284286, upload-time = "2025-11-03T21:33:43.324Z" }, - { url = "https://files.pythonhosted.org/packages/20/31/32c0c4610cbc070362bf1d2e4ea86d1ea29014d400a6d6c2486fcfd57766/regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801", size = 274741, upload-time = "2025-11-03T21:33:45.557Z" }, +version = "2026.1.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/86/07d5056945f9ec4590b518171c4254a5925832eb727b56d3c38a7476f316/regex-2026.1.15.tar.gz", hash = "sha256:164759aa25575cbc0651bef59a0b18353e54300d79ace8084c818ad8ac72b7d5", size = 414811, upload-time = "2026-01-14T23:18:02.775Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/c9/0c80c96eab96948363d270143138d671d5731c3a692b417629bf3492a9d6/regex-2026.1.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ae6020fb311f68d753b7efa9d4b9a5d47a5d6466ea0d5e3b5a471a960ea6e4a", size = 488168, upload-time = "2026-01-14T23:14:16.129Z" }, + { url = "https://files.pythonhosted.org/packages/17/f0/271c92f5389a552494c429e5cc38d76d1322eb142fb5db3c8ccc47751468/regex-2026.1.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eddf73f41225942c1f994914742afa53dc0d01a6e20fe14b878a1b1edc74151f", size = 290636, upload-time = "2026-01-14T23:14:17.715Z" }, + { url = "https://files.pythonhosted.org/packages/a0/f9/5f1fd077d106ca5655a0f9ff8f25a1ab55b92128b5713a91ed7134ff688e/regex-2026.1.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e8cd52557603f5c66a548f69421310886b28b7066853089e1a71ee710e1cdc1", size = 288496, upload-time = "2026-01-14T23:14:19.326Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e1/8f43b03a4968c748858ec77f746c286d81f896c2e437ccf050ebc5d3128c/regex-2026.1.15-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5170907244b14303edc5978f522f16c974f32d3aa92109fabc2af52411c9433b", size = 793503, upload-time = "2026-01-14T23:14:20.922Z" }, + { url = "https://files.pythonhosted.org/packages/8d/4e/a39a5e8edc5377a46a7c875c2f9a626ed3338cb3bb06931be461c3e1a34a/regex-2026.1.15-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2748c1ec0663580b4510bd89941a31560b4b439a0b428b49472a3d9944d11cd8", size = 860535, upload-time = "2026-01-14T23:14:22.405Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1c/9dce667a32a9477f7a2869c1c767dc00727284a9fa3ff5c09a5c6c03575e/regex-2026.1.15-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2f2775843ca49360508d080eaa87f94fa248e2c946bbcd963bb3aae14f333413", size = 907225, upload-time = "2026-01-14T23:14:23.897Z" }, + { url = "https://files.pythonhosted.org/packages/a4/3c/87ca0a02736d16b6262921425e84b48984e77d8e4e572c9072ce96e66c30/regex-2026.1.15-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9ea2604370efc9a174c1b5dcc81784fb040044232150f7f33756049edfc9026", size = 800526, upload-time = "2026-01-14T23:14:26.039Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ff/647d5715aeea7c87bdcbd2f578f47b415f55c24e361e639fe8c0cc88878f/regex-2026.1.15-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0dcd31594264029b57bf16f37fd7248a70b3b764ed9e0839a8f271b2d22c0785", size = 773446, upload-time = "2026-01-14T23:14:28.109Z" }, + { url = "https://files.pythonhosted.org/packages/af/89/bf22cac25cb4ba0fe6bff52ebedbb65b77a179052a9d6037136ae93f42f4/regex-2026.1.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c08c1f3e34338256732bd6938747daa3c0d5b251e04b6e43b5813e94d503076e", size = 783051, upload-time = "2026-01-14T23:14:29.929Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f4/6ed03e71dca6348a5188363a34f5e26ffd5db1404780288ff0d79513bce4/regex-2026.1.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e43a55f378df1e7a4fa3547c88d9a5a9b7113f653a66821bcea4718fe6c58763", size = 854485, upload-time = "2026-01-14T23:14:31.366Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9a/8e8560bd78caded8eb137e3e47612430a05b9a772caf60876435192d670a/regex-2026.1.15-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:f82110ab962a541737bd0ce87978d4c658f06e7591ba899192e2712a517badbb", size = 762195, upload-time = "2026-01-14T23:14:32.802Z" }, + { url = "https://files.pythonhosted.org/packages/38/6b/61fc710f9aa8dfcd764fe27d37edfaa023b1a23305a0d84fccd5adb346ea/regex-2026.1.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:27618391db7bdaf87ac6c92b31e8f0dfb83a9de0075855152b720140bda177a2", size = 845986, upload-time = "2026-01-14T23:14:34.898Z" }, + { url = "https://files.pythonhosted.org/packages/fd/2e/fbee4cb93f9d686901a7ca8d94285b80405e8c34fe4107f63ffcbfb56379/regex-2026.1.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bfb0d6be01fbae8d6655c8ca21b3b72458606c4aec9bbc932db758d47aba6db1", size = 788992, upload-time = "2026-01-14T23:14:37.116Z" }, + { url = "https://files.pythonhosted.org/packages/ed/14/3076348f3f586de64b1ab75a3fbabdaab7684af7f308ad43be7ef1849e55/regex-2026.1.15-cp311-cp311-win32.whl", hash = "sha256:b10e42a6de0e32559a92f2f8dc908478cc0fa02838d7dbe764c44dca3fa13569", size = 265893, upload-time = "2026-01-14T23:14:38.426Z" }, + { url = "https://files.pythonhosted.org/packages/0f/19/772cf8b5fc803f5c89ba85d8b1870a1ca580dc482aa030383a9289c82e44/regex-2026.1.15-cp311-cp311-win_amd64.whl", hash = "sha256:e9bf3f0bbdb56633c07d7116ae60a576f846efdd86a8848f8d62b749e1209ca7", size = 277840, upload-time = "2026-01-14T23:14:39.785Z" }, + { url = "https://files.pythonhosted.org/packages/78/84/d05f61142709474da3c0853222d91086d3e1372bcdab516c6fd8d80f3297/regex-2026.1.15-cp311-cp311-win_arm64.whl", hash = "sha256:41aef6f953283291c4e4e6850607bd71502be67779586a61472beacb315c97ec", size = 270374, upload-time = "2026-01-14T23:14:41.592Z" }, + { url = "https://files.pythonhosted.org/packages/92/81/10d8cf43c807d0326efe874c1b79f22bfb0fb226027b0b19ebc26d301408/regex-2026.1.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4c8fcc5793dde01641a35905d6731ee1548f02b956815f8f1cab89e515a5bdf1", size = 489398, upload-time = "2026-01-14T23:14:43.741Z" }, + { url = "https://files.pythonhosted.org/packages/90/b0/7c2a74e74ef2a7c32de724658a69a862880e3e4155cba992ba04d1c70400/regex-2026.1.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bfd876041a956e6a90ad7cdb3f6a630c07d491280bfeed4544053cd434901681", size = 291339, upload-time = "2026-01-14T23:14:45.183Z" }, + { url = "https://files.pythonhosted.org/packages/19/4d/16d0773d0c818417f4cc20aa0da90064b966d22cd62a8c46765b5bd2d643/regex-2026.1.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9250d087bc92b7d4899ccd5539a1b2334e44eee85d848c4c1aef8e221d3f8c8f", size = 289003, upload-time = "2026-01-14T23:14:47.25Z" }, + { url = "https://files.pythonhosted.org/packages/c6/e4/1fc4599450c9f0863d9406e944592d968b8d6dfd0d552a7d569e43bceada/regex-2026.1.15-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c8a154cf6537ebbc110e24dabe53095e714245c272da9c1be05734bdad4a61aa", size = 798656, upload-time = "2026-01-14T23:14:48.77Z" }, + { url = "https://files.pythonhosted.org/packages/b2/e6/59650d73a73fa8a60b3a590545bfcf1172b4384a7df2e7fe7b9aab4e2da9/regex-2026.1.15-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8050ba2e3ea1d8731a549e83c18d2f0999fbc99a5f6bd06b4c91449f55291804", size = 864252, upload-time = "2026-01-14T23:14:50.528Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ab/1d0f4d50a1638849a97d731364c9a80fa304fec46325e48330c170ee8e80/regex-2026.1.15-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf065240704cb8951cc04972cf107063917022511273e0969bdb34fc173456c", size = 912268, upload-time = "2026-01-14T23:14:52.952Z" }, + { url = "https://files.pythonhosted.org/packages/dd/df/0d722c030c82faa1d331d1921ee268a4e8fb55ca8b9042c9341c352f17fa/regex-2026.1.15-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c32bef3e7aeee75746748643667668ef941d28b003bfc89994ecf09a10f7a1b5", size = 803589, upload-time = "2026-01-14T23:14:55.182Z" }, + { url = "https://files.pythonhosted.org/packages/66/23/33289beba7ccb8b805c6610a8913d0131f834928afc555b241caabd422a9/regex-2026.1.15-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d5eaa4a4c5b1906bd0d2508d68927f15b81821f85092e06f1a34a4254b0e1af3", size = 775700, upload-time = "2026-01-14T23:14:56.707Z" }, + { url = "https://files.pythonhosted.org/packages/e7/65/bf3a42fa6897a0d3afa81acb25c42f4b71c274f698ceabd75523259f6688/regex-2026.1.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:86c1077a3cc60d453d4084d5b9649065f3bf1184e22992bd322e1f081d3117fb", size = 787928, upload-time = "2026-01-14T23:14:58.312Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f5/13bf65864fc314f68cdd6d8ca94adcab064d4d39dbd0b10fef29a9da48fc/regex-2026.1.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:2b091aefc05c78d286657cd4db95f2e6313375ff65dcf085e42e4c04d9c8d410", size = 858607, upload-time = "2026-01-14T23:15:00.657Z" }, + { url = "https://files.pythonhosted.org/packages/a3/31/040e589834d7a439ee43fb0e1e902bc81bd58a5ba81acffe586bb3321d35/regex-2026.1.15-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:57e7d17f59f9ebfa9667e6e5a1c0127b96b87cb9cede8335482451ed00788ba4", size = 763729, upload-time = "2026-01-14T23:15:02.248Z" }, + { url = "https://files.pythonhosted.org/packages/9b/84/6921e8129687a427edf25a34a5594b588b6d88f491320b9de5b6339a4fcb/regex-2026.1.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c6c4dcdfff2c08509faa15d36ba7e5ef5fcfab25f1e8f85a0c8f45bc3a30725d", size = 850697, upload-time = "2026-01-14T23:15:03.878Z" }, + { url = "https://files.pythonhosted.org/packages/8a/87/3d06143d4b128f4229158f2de5de6c8f2485170c7221e61bf381313314b2/regex-2026.1.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf8ff04c642716a7f2048713ddc6278c5fd41faa3b9cab12607c7abecd012c22", size = 789849, upload-time = "2026-01-14T23:15:06.102Z" }, + { url = "https://files.pythonhosted.org/packages/77/69/c50a63842b6bd48850ebc7ab22d46e7a2a32d824ad6c605b218441814639/regex-2026.1.15-cp312-cp312-win32.whl", hash = "sha256:82345326b1d8d56afbe41d881fdf62f1926d7264b2fc1537f99ae5da9aad7913", size = 266279, upload-time = "2026-01-14T23:15:07.678Z" }, + { url = "https://files.pythonhosted.org/packages/f2/36/39d0b29d087e2b11fd8191e15e81cce1b635fcc845297c67f11d0d19274d/regex-2026.1.15-cp312-cp312-win_amd64.whl", hash = "sha256:4def140aa6156bc64ee9912383d4038f3fdd18fee03a6f222abd4de6357ce42a", size = 277166, upload-time = "2026-01-14T23:15:09.257Z" }, + { url = "https://files.pythonhosted.org/packages/28/32/5b8e476a12262748851fa8ab1b0be540360692325975b094e594dfebbb52/regex-2026.1.15-cp312-cp312-win_arm64.whl", hash = "sha256:c6c565d9a6e1a8d783c1948937ffc377dd5771e83bd56de8317c450a954d2056", size = 270415, upload-time = "2026-01-14T23:15:10.743Z" }, + { url = "https://files.pythonhosted.org/packages/f8/2e/6870bb16e982669b674cce3ee9ff2d1d46ab80528ee6bcc20fb2292efb60/regex-2026.1.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e69d0deeb977ffe7ed3d2e4439360089f9c3f217ada608f0f88ebd67afb6385e", size = 489164, upload-time = "2026-01-14T23:15:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/dc/67/9774542e203849b0286badf67199970a44ebdb0cc5fb739f06e47ada72f8/regex-2026.1.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3601ffb5375de85a16f407854d11cca8fe3f5febbe3ac78fb2866bb220c74d10", size = 291218, upload-time = "2026-01-14T23:15:15.647Z" }, + { url = "https://files.pythonhosted.org/packages/b2/87/b0cda79f22b8dee05f774922a214da109f9a4c0eca5da2c9d72d77ea062c/regex-2026.1.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4c5ef43b5c2d4114eb8ea424bb8c9cec01d5d17f242af88b2448f5ee81caadbc", size = 288895, upload-time = "2026-01-14T23:15:17.788Z" }, + { url = "https://files.pythonhosted.org/packages/3b/6a/0041f0a2170d32be01ab981d6346c83a8934277d82c780d60b127331f264/regex-2026.1.15-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:968c14d4f03e10b2fd960f1d5168c1f0ac969381d3c1fcc973bc45fb06346599", size = 798680, upload-time = "2026-01-14T23:15:19.342Z" }, + { url = "https://files.pythonhosted.org/packages/58/de/30e1cfcdbe3e891324aa7568b7c968771f82190df5524fabc1138cb2d45a/regex-2026.1.15-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:56a5595d0f892f214609c9f76b41b7428bed439d98dc961efafdd1354d42baae", size = 864210, upload-time = "2026-01-14T23:15:22.005Z" }, + { url = "https://files.pythonhosted.org/packages/64/44/4db2f5c5ca0ccd40ff052ae7b1e9731352fcdad946c2b812285a7505ca75/regex-2026.1.15-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf650f26087363434c4e560011f8e4e738f6f3e029b85d4904c50135b86cfa5", size = 912358, upload-time = "2026-01-14T23:15:24.569Z" }, + { url = "https://files.pythonhosted.org/packages/79/b6/e6a5665d43a7c42467138c8a2549be432bad22cbd206f5ec87162de74bd7/regex-2026.1.15-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18388a62989c72ac24de75f1449d0fb0b04dfccd0a1a7c1c43af5eb503d890f6", size = 803583, upload-time = "2026-01-14T23:15:26.526Z" }, + { url = "https://files.pythonhosted.org/packages/e7/53/7cd478222169d85d74d7437e74750005e993f52f335f7c04ff7adfda3310/regex-2026.1.15-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6d220a2517f5893f55daac983bfa9fe998a7dbcaee4f5d27a88500f8b7873788", size = 775782, upload-time = "2026-01-14T23:15:29.352Z" }, + { url = "https://files.pythonhosted.org/packages/ca/b5/75f9a9ee4b03a7c009fe60500fe550b45df94f0955ca29af16333ef557c5/regex-2026.1.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9c08c2fbc6120e70abff5d7f28ffb4d969e14294fb2143b4b5c7d20e46d1714", size = 787978, upload-time = "2026-01-14T23:15:31.295Z" }, + { url = "https://files.pythonhosted.org/packages/72/b3/79821c826245bbe9ccbb54f6eadb7879c722fd3e0248c17bfc90bf54e123/regex-2026.1.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7ef7d5d4bd49ec7364315167a4134a015f61e8266c6d446fc116a9ac4456e10d", size = 858550, upload-time = "2026-01-14T23:15:33.558Z" }, + { url = "https://files.pythonhosted.org/packages/4a/85/2ab5f77a1c465745bfbfcb3ad63178a58337ae8d5274315e2cc623a822fa/regex-2026.1.15-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e42844ad64194fa08d5ccb75fe6a459b9b08e6d7296bd704460168d58a388f3", size = 763747, upload-time = "2026-01-14T23:15:35.206Z" }, + { url = "https://files.pythonhosted.org/packages/6d/84/c27df502d4bfe2873a3e3a7cf1bdb2b9cc10284d1a44797cf38bed790470/regex-2026.1.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cfecdaa4b19f9ca534746eb3b55a5195d5c95b88cac32a205e981ec0a22b7d31", size = 850615, upload-time = "2026-01-14T23:15:37.523Z" }, + { url = "https://files.pythonhosted.org/packages/7d/b7/658a9782fb253680aa8ecb5ccbb51f69e088ed48142c46d9f0c99b46c575/regex-2026.1.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:08df9722d9b87834a3d701f3fca570b2be115654dbfd30179f30ab2f39d606d3", size = 789951, upload-time = "2026-01-14T23:15:39.582Z" }, + { url = "https://files.pythonhosted.org/packages/fc/2a/5928af114441e059f15b2f63e188bd00c6529b3051c974ade7444b85fcda/regex-2026.1.15-cp313-cp313-win32.whl", hash = "sha256:d426616dae0967ca225ab12c22274eb816558f2f99ccb4a1d52ca92e8baf180f", size = 266275, upload-time = "2026-01-14T23:15:42.108Z" }, + { url = "https://files.pythonhosted.org/packages/4f/16/5bfbb89e435897bff28cf0352a992ca719d9e55ebf8b629203c96b6ce4f7/regex-2026.1.15-cp313-cp313-win_amd64.whl", hash = "sha256:febd38857b09867d3ed3f4f1af7d241c5c50362e25ef43034995b77a50df494e", size = 277145, upload-time = "2026-01-14T23:15:44.244Z" }, + { url = "https://files.pythonhosted.org/packages/56/c1/a09ff7392ef4233296e821aec5f78c51be5e91ffde0d163059e50fd75835/regex-2026.1.15-cp313-cp313-win_arm64.whl", hash = "sha256:8e32f7896f83774f91499d239e24cebfadbc07639c1494bb7213983842348337", size = 270411, upload-time = "2026-01-14T23:15:45.858Z" }, + { url = "https://files.pythonhosted.org/packages/3c/38/0cfd5a78e5c6db00e6782fdae70458f89850ce95baa5e8694ab91d89744f/regex-2026.1.15-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ec94c04149b6a7b8120f9f44565722c7ae31b7a6d2275569d2eefa76b83da3be", size = 492068, upload-time = "2026-01-14T23:15:47.616Z" }, + { url = "https://files.pythonhosted.org/packages/50/72/6c86acff16cb7c959c4355826bbf06aad670682d07c8f3998d9ef4fee7cd/regex-2026.1.15-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40c86d8046915bb9aeb15d3f3f15b6fd500b8ea4485b30e1bbc799dab3fe29f8", size = 292756, upload-time = "2026-01-14T23:15:49.307Z" }, + { url = "https://files.pythonhosted.org/packages/4e/58/df7fb69eadfe76526ddfce28abdc0af09ffe65f20c2c90932e89d705153f/regex-2026.1.15-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:726ea4e727aba21643205edad8f2187ec682d3305d790f73b7a51c7587b64bdd", size = 291114, upload-time = "2026-01-14T23:15:51.484Z" }, + { url = "https://files.pythonhosted.org/packages/ed/6c/a4011cd1cf96b90d2cdc7e156f91efbd26531e822a7fbb82a43c1016678e/regex-2026.1.15-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1cb740d044aff31898804e7bf1181cc72c03d11dfd19932b9911ffc19a79070a", size = 807524, upload-time = "2026-01-14T23:15:53.102Z" }, + { url = "https://files.pythonhosted.org/packages/1d/25/a53ffb73183f69c3e9f4355c4922b76d2840aee160af6af5fac229b6201d/regex-2026.1.15-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05d75a668e9ea16f832390d22131fe1e8acc8389a694c8febc3e340b0f810b93", size = 873455, upload-time = "2026-01-14T23:15:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/66/0b/8b47fc2e8f97d9b4a851736f3890a5f786443aa8901061c55f24c955f45b/regex-2026.1.15-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d991483606f3dbec93287b9f35596f41aa2e92b7c2ebbb935b63f409e243c9af", size = 915007, upload-time = "2026-01-14T23:15:57.041Z" }, + { url = "https://files.pythonhosted.org/packages/c2/fa/97de0d681e6d26fabe71968dbee06dd52819e9a22fdce5dac7256c31ed84/regex-2026.1.15-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:194312a14819d3e44628a44ed6fea6898fdbecb0550089d84c403475138d0a09", size = 812794, upload-time = "2026-01-14T23:15:58.916Z" }, + { url = "https://files.pythonhosted.org/packages/22/38/e752f94e860d429654aa2b1c51880bff8dfe8f084268258adf9151cf1f53/regex-2026.1.15-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fe2fda4110a3d0bc163c2e0664be44657431440722c5c5315c65155cab92f9e5", size = 781159, upload-time = "2026-01-14T23:16:00.817Z" }, + { url = "https://files.pythonhosted.org/packages/e9/a7/d739ffaef33c378fc888302a018d7f81080393d96c476b058b8c64fd2b0d/regex-2026.1.15-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:124dc36c85d34ef2d9164da41a53c1c8c122cfb1f6e1ec377a1f27ee81deb794", size = 795558, upload-time = "2026-01-14T23:16:03.267Z" }, + { url = "https://files.pythonhosted.org/packages/3e/c4/542876f9a0ac576100fc73e9c75b779f5c31e3527576cfc9cb3009dcc58a/regex-2026.1.15-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1774cd1981cd212506a23a14dba7fdeaee259f5deba2df6229966d9911e767a", size = 868427, upload-time = "2026-01-14T23:16:05.646Z" }, + { url = "https://files.pythonhosted.org/packages/fc/0f/d5655bea5b22069e32ae85a947aa564912f23758e112cdb74212848a1a1b/regex-2026.1.15-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:b5f7d8d2867152cdb625e72a530d2ccb48a3d199159144cbdd63870882fb6f80", size = 769939, upload-time = "2026-01-14T23:16:07.542Z" }, + { url = "https://files.pythonhosted.org/packages/20/06/7e18a4fa9d326daeda46d471a44ef94201c46eaa26dbbb780b5d92cbfdda/regex-2026.1.15-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:492534a0ab925d1db998defc3c302dae3616a2fc3fe2e08db1472348f096ddf2", size = 854753, upload-time = "2026-01-14T23:16:10.395Z" }, + { url = "https://files.pythonhosted.org/packages/3b/67/dc8946ef3965e166f558ef3b47f492bc364e96a265eb4a2bb3ca765c8e46/regex-2026.1.15-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c661fc820cfb33e166bf2450d3dadbda47c8d8981898adb9b6fe24e5e582ba60", size = 799559, upload-time = "2026-01-14T23:16:12.347Z" }, + { url = "https://files.pythonhosted.org/packages/a5/61/1bba81ff6d50c86c65d9fd84ce9699dd106438ee4cdb105bf60374ee8412/regex-2026.1.15-cp313-cp313t-win32.whl", hash = "sha256:99ad739c3686085e614bf77a508e26954ff1b8f14da0e3765ff7abbf7799f952", size = 268879, upload-time = "2026-01-14T23:16:14.049Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5e/cef7d4c5fb0ea3ac5c775fd37db5747f7378b29526cc83f572198924ff47/regex-2026.1.15-cp313-cp313t-win_amd64.whl", hash = "sha256:32655d17905e7ff8ba5c764c43cb124e34a9245e45b83c22e81041e1071aee10", size = 280317, upload-time = "2026-01-14T23:16:15.718Z" }, + { url = "https://files.pythonhosted.org/packages/b4/52/4317f7a5988544e34ab57b4bde0f04944c4786128c933fb09825924d3e82/regex-2026.1.15-cp313-cp313t-win_arm64.whl", hash = "sha256:b2a13dd6a95e95a489ca242319d18fc02e07ceb28fa9ad146385194d95b3c829", size = 271551, upload-time = "2026-01-14T23:16:17.533Z" }, + { url = "https://files.pythonhosted.org/packages/52/0a/47fa888ec7cbbc7d62c5f2a6a888878e76169170ead271a35239edd8f0e8/regex-2026.1.15-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d920392a6b1f353f4aa54328c867fec3320fa50657e25f64abf17af054fc97ac", size = 489170, upload-time = "2026-01-14T23:16:19.835Z" }, + { url = "https://files.pythonhosted.org/packages/ac/c4/d000e9b7296c15737c9301708e9e7fbdea009f8e93541b6b43bdb8219646/regex-2026.1.15-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b5a28980a926fa810dbbed059547b02783952e2efd9c636412345232ddb87ff6", size = 291146, upload-time = "2026-01-14T23:16:21.541Z" }, + { url = "https://files.pythonhosted.org/packages/f9/b6/921cc61982e538682bdf3bdf5b2c6ab6b34368da1f8e98a6c1ddc503c9cf/regex-2026.1.15-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:621f73a07595d83f28952d7bd1e91e9d1ed7625fb7af0064d3516674ec93a2a2", size = 288986, upload-time = "2026-01-14T23:16:23.381Z" }, + { url = "https://files.pythonhosted.org/packages/ca/33/eb7383dde0bbc93f4fb9d03453aab97e18ad4024ac7e26cef8d1f0a2cff0/regex-2026.1.15-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d7d92495f47567a9b1669c51fc8d6d809821849063d168121ef801bbc213846", size = 799098, upload-time = "2026-01-14T23:16:25.088Z" }, + { url = "https://files.pythonhosted.org/packages/27/56/b664dccae898fc8d8b4c23accd853f723bde0f026c747b6f6262b688029c/regex-2026.1.15-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8dd16fba2758db7a3780a051f245539c4451ca20910f5a5e6ea1c08d06d4a76b", size = 864980, upload-time = "2026-01-14T23:16:27.297Z" }, + { url = "https://files.pythonhosted.org/packages/16/40/0999e064a170eddd237bae9ccfcd8f28b3aa98a38bf727a086425542a4fc/regex-2026.1.15-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1e1808471fbe44c1a63e5f577a1d5f02fe5d66031dcbdf12f093ffc1305a858e", size = 911607, upload-time = "2026-01-14T23:16:29.235Z" }, + { url = "https://files.pythonhosted.org/packages/07/78/c77f644b68ab054e5a674fb4da40ff7bffb2c88df58afa82dbf86573092d/regex-2026.1.15-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0751a26ad39d4f2ade8fe16c59b2bf5cb19eb3d2cd543e709e583d559bd9efde", size = 803358, upload-time = "2026-01-14T23:16:31.369Z" }, + { url = "https://files.pythonhosted.org/packages/27/31/d4292ea8566eaa551fafc07797961c5963cf5235c797cc2ae19b85dfd04d/regex-2026.1.15-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0c7684c7f9ca241344ff95a1de964f257a5251968484270e91c25a755532c5", size = 775833, upload-time = "2026-01-14T23:16:33.141Z" }, + { url = "https://files.pythonhosted.org/packages/ce/b2/cff3bf2fea4133aa6fb0d1e370b37544d18c8350a2fa118c7e11d1db0e14/regex-2026.1.15-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:74f45d170a21df41508cb67165456538425185baaf686281fa210d7e729abc34", size = 788045, upload-time = "2026-01-14T23:16:35.005Z" }, + { url = "https://files.pythonhosted.org/packages/8d/99/2cb9b69045372ec877b6f5124bda4eb4253bc58b8fe5848c973f752bc52c/regex-2026.1.15-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:f1862739a1ffb50615c0fde6bae6569b5efbe08d98e59ce009f68a336f64da75", size = 859374, upload-time = "2026-01-14T23:16:36.919Z" }, + { url = "https://files.pythonhosted.org/packages/09/16/710b0a5abe8e077b1729a562d2f297224ad079f3a66dce46844c193416c8/regex-2026.1.15-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:453078802f1b9e2b7303fb79222c054cb18e76f7bdc220f7530fdc85d319f99e", size = 763940, upload-time = "2026-01-14T23:16:38.685Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d1/7585c8e744e40eb3d32f119191969b91de04c073fca98ec14299041f6e7e/regex-2026.1.15-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:a30a68e89e5a218b8b23a52292924c1f4b245cb0c68d1cce9aec9bbda6e2c160", size = 850112, upload-time = "2026-01-14T23:16:40.646Z" }, + { url = "https://files.pythonhosted.org/packages/af/d6/43e1dd85df86c49a347aa57c1f69d12c652c7b60e37ec162e3096194a278/regex-2026.1.15-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9479cae874c81bf610d72b85bb681a94c95722c127b55445285fb0e2c82db8e1", size = 789586, upload-time = "2026-01-14T23:16:42.799Z" }, + { url = "https://files.pythonhosted.org/packages/93/38/77142422f631e013f316aaae83234c629555729a9fbc952b8a63ac91462a/regex-2026.1.15-cp314-cp314-win32.whl", hash = "sha256:d639a750223132afbfb8f429c60d9d318aeba03281a5f1ab49f877456448dcf1", size = 271691, upload-time = "2026-01-14T23:16:44.671Z" }, + { url = "https://files.pythonhosted.org/packages/4a/a9/ab16b4649524ca9e05213c1cdbb7faa85cc2aa90a0230d2f796cbaf22736/regex-2026.1.15-cp314-cp314-win_amd64.whl", hash = "sha256:4161d87f85fa831e31469bfd82c186923070fc970b9de75339b68f0c75b51903", size = 280422, upload-time = "2026-01-14T23:16:46.607Z" }, + { url = "https://files.pythonhosted.org/packages/be/2a/20fd057bf3521cb4791f69f869635f73e0aaf2b9ad2d260f728144f9047c/regex-2026.1.15-cp314-cp314-win_arm64.whl", hash = "sha256:91c5036ebb62663a6b3999bdd2e559fd8456d17e2b485bf509784cd31a8b1705", size = 273467, upload-time = "2026-01-14T23:16:48.967Z" }, + { url = "https://files.pythonhosted.org/packages/ad/77/0b1e81857060b92b9cad239104c46507dd481b3ff1fa79f8e7f865aae38a/regex-2026.1.15-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ee6854c9000a10938c79238de2379bea30c82e4925a371711af45387df35cab8", size = 492073, upload-time = "2026-01-14T23:16:51.154Z" }, + { url = "https://files.pythonhosted.org/packages/70/f3/f8302b0c208b22c1e4f423147e1913fd475ddd6230565b299925353de644/regex-2026.1.15-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c2b80399a422348ce5de4fe40c418d6299a0fa2803dd61dc0b1a2f28e280fcf", size = 292757, upload-time = "2026-01-14T23:16:53.08Z" }, + { url = "https://files.pythonhosted.org/packages/bf/f0/ef55de2460f3b4a6da9d9e7daacd0cb79d4ef75c64a2af316e68447f0df0/regex-2026.1.15-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:dca3582bca82596609959ac39e12b7dad98385b4fefccb1151b937383cec547d", size = 291122, upload-time = "2026-01-14T23:16:55.383Z" }, + { url = "https://files.pythonhosted.org/packages/cf/55/bb8ccbacabbc3a11d863ee62a9f18b160a83084ea95cdfc5d207bfc3dd75/regex-2026.1.15-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef71d476caa6692eea743ae5ea23cde3260677f70122c4d258ca952e5c2d4e84", size = 807761, upload-time = "2026-01-14T23:16:57.251Z" }, + { url = "https://files.pythonhosted.org/packages/8f/84/f75d937f17f81e55679a0509e86176e29caa7298c38bd1db7ce9c0bf6075/regex-2026.1.15-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c243da3436354f4af6c3058a3f81a97d47ea52c9bd874b52fd30274853a1d5df", size = 873538, upload-time = "2026-01-14T23:16:59.349Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d9/0da86327df70349aa8d86390da91171bd3ca4f0e7c1d1d453a9c10344da3/regex-2026.1.15-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8355ad842a7c7e9e5e55653eade3b7d1885ba86f124dd8ab1f722f9be6627434", size = 915066, upload-time = "2026-01-14T23:17:01.607Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5e/f660fb23fc77baa2a61aa1f1fe3a4eea2bbb8a286ddec148030672e18834/regex-2026.1.15-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f192a831d9575271a22d804ff1a5355355723f94f31d9eef25f0d45a152fdc1a", size = 812938, upload-time = "2026-01-14T23:17:04.366Z" }, + { url = "https://files.pythonhosted.org/packages/69/33/a47a29bfecebbbfd1e5cd3f26b28020a97e4820f1c5148e66e3b7d4b4992/regex-2026.1.15-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:166551807ec20d47ceaeec380081f843e88c8949780cd42c40f18d16168bed10", size = 781314, upload-time = "2026-01-14T23:17:06.378Z" }, + { url = "https://files.pythonhosted.org/packages/65/ec/7ec2bbfd4c3f4e494a24dec4c6943a668e2030426b1b8b949a6462d2c17b/regex-2026.1.15-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9ca1cbdc0fbfe5e6e6f8221ef2309988db5bcede52443aeaee9a4ad555e0dac", size = 795652, upload-time = "2026-01-14T23:17:08.521Z" }, + { url = "https://files.pythonhosted.org/packages/46/79/a5d8651ae131fe27d7c521ad300aa7f1c7be1dbeee4d446498af5411b8a9/regex-2026.1.15-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b30bcbd1e1221783c721483953d9e4f3ab9c5d165aa709693d3f3946747b1aea", size = 868550, upload-time = "2026-01-14T23:17:10.573Z" }, + { url = "https://files.pythonhosted.org/packages/06/b7/25635d2809664b79f183070786a5552dd4e627e5aedb0065f4e3cf8ee37d/regex-2026.1.15-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2a8d7b50c34578d0d3bf7ad58cde9652b7d683691876f83aedc002862a35dc5e", size = 769981, upload-time = "2026-01-14T23:17:12.871Z" }, + { url = "https://files.pythonhosted.org/packages/16/8b/fc3fcbb2393dcfa4a6c5ffad92dc498e842df4581ea9d14309fcd3c55fb9/regex-2026.1.15-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:9d787e3310c6a6425eb346be4ff2ccf6eece63017916fd77fe8328c57be83521", size = 854780, upload-time = "2026-01-14T23:17:14.837Z" }, + { url = "https://files.pythonhosted.org/packages/d0/38/dde117c76c624713c8a2842530be9c93ca8b606c0f6102d86e8cd1ce8bea/regex-2026.1.15-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:619843841e220adca114118533a574a9cd183ed8a28b85627d2844c500a2b0db", size = 799778, upload-time = "2026-01-14T23:17:17.369Z" }, + { url = "https://files.pythonhosted.org/packages/e3/0d/3a6cfa9ae99606afb612d8fb7a66b245a9d5ff0f29bb347c8a30b6ad561b/regex-2026.1.15-cp314-cp314t-win32.whl", hash = "sha256:e90b8db97f6f2c97eb045b51a6b2c5ed69cedd8392459e0642d4199b94fabd7e", size = 274667, upload-time = "2026-01-14T23:17:19.301Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b2/297293bb0742fd06b8d8e2572db41a855cdf1cae0bf009b1cb74fe07e196/regex-2026.1.15-cp314-cp314t-win_amd64.whl", hash = "sha256:5ef19071f4ac9f0834793af85bd04a920b4407715624e40cb7a0631a11137cdf", size = 284386, upload-time = "2026-01-14T23:17:21.231Z" }, + { url = "https://files.pythonhosted.org/packages/95/e4/a3b9480c78cf8ee86626cb06f8d931d74d775897d44201ccb813097ae697/regex-2026.1.15-cp314-cp314t-win_arm64.whl", hash = "sha256:ca89c5e596fc05b015f27561b3793dc2fa0917ea0d7507eebb448efd35274a70", size = 274837, upload-time = "2026-01-14T23:17:23.146Z" }, ] [[package]] @@ -2503,15 +2525,15 @@ wheels = [ [[package]] name = "rich" -version = "14.2.0" +version = "14.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" } +sdist = { url = "https://files.pythonhosted.org/packages/74/99/a4cab2acbb884f80e558b0771e97e21e939c5dfb460f488d19df485e8298/rich-14.3.2.tar.gz", hash = "sha256:e712f11c1a562a11843306f5ed999475f09ac31ffb64281f73ab29ffdda8b3b8", size = 230143, upload-time = "2026-02-01T16:20:47.908Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" }, + { url = "https://files.pythonhosted.org/packages/ef/45/615f5babd880b4bd7d405cc0dc348234c5ffb6ed1ea33e152ede08b2072d/rich-14.3.2-py3-none-any.whl", hash = "sha256:08e67c3e90884651da3239ea668222d19bea7b589149d8014a21c633420dbb69", size = 309963, upload-time = "2026-02-01T16:20:46.078Z" }, ] [[package]] @@ -2636,28 +2658,28 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.7" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b7/5b/dd7406afa6c95e3d8fa9d652b6d6dd17dd4a6bf63cb477014e8ccd3dcd46/ruff-0.14.7.tar.gz", hash = "sha256:3417deb75d23bd14a722b57b0a1435561db65f0ad97435b4cf9f85ffcef34ae5", size = 5727324, upload-time = "2025-11-28T20:55:10.525Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/b1/7ea5647aaf90106f6d102230e5df874613da43d1089864da1553b899ba5e/ruff-0.14.7-py3-none-linux_armv6l.whl", hash = "sha256:b9d5cb5a176c7236892ad7224bc1e63902e4842c460a0b5210701b13e3de4fca", size = 13414475, upload-time = "2025-11-28T20:54:54.569Z" }, - { url = "https://files.pythonhosted.org/packages/af/19/fddb4cd532299db9cdaf0efdc20f5c573ce9952a11cb532d3b859d6d9871/ruff-0.14.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3f64fe375aefaf36ca7d7250292141e39b4cea8250427482ae779a2aa5d90015", size = 13634613, upload-time = "2025-11-28T20:55:17.54Z" }, - { url = "https://files.pythonhosted.org/packages/40/2b/469a66e821d4f3de0440676ed3e04b8e2a1dc7575cf6fa3ba6d55e3c8557/ruff-0.14.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:93e83bd3a9e1a3bda64cb771c0d47cda0e0d148165013ae2d3554d718632d554", size = 12765458, upload-time = "2025-11-28T20:55:26.128Z" }, - { url = "https://files.pythonhosted.org/packages/f1/05/0b001f734fe550bcfde4ce845948ac620ff908ab7241a39a1b39bb3c5f49/ruff-0.14.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3838948e3facc59a6070795de2ae16e5786861850f78d5914a03f12659e88f94", size = 13236412, upload-time = "2025-11-28T20:55:28.602Z" }, - { url = "https://files.pythonhosted.org/packages/11/36/8ed15d243f011b4e5da75cd56d6131c6766f55334d14ba31cce5461f28aa/ruff-0.14.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24c8487194d38b6d71cd0fd17a5b6715cda29f59baca1defe1e3a03240f851d1", size = 13182949, upload-time = "2025-11-28T20:55:33.265Z" }, - { url = "https://files.pythonhosted.org/packages/3b/cf/fcb0b5a195455729834f2a6eadfe2e4519d8ca08c74f6d2b564a4f18f553/ruff-0.14.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79c73db6833f058a4be8ffe4a0913b6d4ad41f6324745179bd2aa09275b01d0b", size = 13816470, upload-time = "2025-11-28T20:55:08.203Z" }, - { url = "https://files.pythonhosted.org/packages/7f/5d/34a4748577ff7a5ed2f2471456740f02e86d1568a18c9faccfc73bd9ca3f/ruff-0.14.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:12eb7014fccff10fc62d15c79d8a6be4d0c2d60fe3f8e4d169a0d2def75f5dad", size = 15289621, upload-time = "2025-11-28T20:55:30.837Z" }, - { url = "https://files.pythonhosted.org/packages/53/53/0a9385f047a858ba133d96f3f8e3c9c66a31cc7c4b445368ef88ebeac209/ruff-0.14.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c623bbdc902de7ff715a93fa3bb377a4e42dd696937bf95669118773dbf0c50", size = 14975817, upload-time = "2025-11-28T20:55:24.107Z" }, - { url = "https://files.pythonhosted.org/packages/a8/d7/2f1c32af54c3b46e7fadbf8006d8b9bcfbea535c316b0bd8813d6fb25e5d/ruff-0.14.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f53accc02ed2d200fa621593cdb3c1ae06aa9b2c3cae70bc96f72f0000ae97a9", size = 14284549, upload-time = "2025-11-28T20:55:06.08Z" }, - { url = "https://files.pythonhosted.org/packages/92/05/434ddd86becd64629c25fb6b4ce7637dd52a45cc4a4415a3008fe61c27b9/ruff-0.14.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:281f0e61a23fcdcffca210591f0f53aafaa15f9025b5b3f9706879aaa8683bc4", size = 14071389, upload-time = "2025-11-28T20:55:35.617Z" }, - { url = "https://files.pythonhosted.org/packages/ff/50/fdf89d4d80f7f9d4f420d26089a79b3bb1538fe44586b148451bc2ba8d9c/ruff-0.14.7-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:dbbaa5e14148965b91cb090236931182ee522a5fac9bc5575bafc5c07b9f9682", size = 14202679, upload-time = "2025-11-28T20:55:01.472Z" }, - { url = "https://files.pythonhosted.org/packages/77/54/87b34988984555425ce967f08a36df0ebd339bb5d9d0e92a47e41151eafc/ruff-0.14.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1464b6e54880c0fe2f2d6eaefb6db15373331414eddf89d6b903767ae2458143", size = 13147677, upload-time = "2025-11-28T20:55:19.933Z" }, - { url = "https://files.pythonhosted.org/packages/67/29/f55e4d44edfe053918a16a3299e758e1c18eef216b7a7092550d7a9ec51c/ruff-0.14.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f217ed871e4621ea6128460df57b19ce0580606c23aeab50f5de425d05226784", size = 13151392, upload-time = "2025-11-28T20:55:21.967Z" }, - { url = "https://files.pythonhosted.org/packages/36/69/47aae6dbd4f1d9b4f7085f4d9dcc84e04561ee7ad067bf52e0f9b02e3209/ruff-0.14.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6be02e849440ed3602d2eb478ff7ff07d53e3758f7948a2a598829660988619e", size = 13412230, upload-time = "2025-11-28T20:55:12.749Z" }, - { url = "https://files.pythonhosted.org/packages/b7/4b/6e96cb6ba297f2ba502a231cd732ed7c3de98b1a896671b932a5eefa3804/ruff-0.14.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19a0f116ee5e2b468dfe80c41c84e2bbd6b74f7b719bee86c2ecde0a34563bcc", size = 14195397, upload-time = "2025-11-28T20:54:56.896Z" }, - { url = "https://files.pythonhosted.org/packages/69/82/251d5f1aa4dcad30aed491b4657cecd9fb4274214da6960ffec144c260f7/ruff-0.14.7-py3-none-win32.whl", hash = "sha256:e33052c9199b347c8937937163b9b149ef6ab2e4bb37b042e593da2e6f6cccfa", size = 13126751, upload-time = "2025-11-28T20:55:03.47Z" }, - { url = "https://files.pythonhosted.org/packages/a8/b5/d0b7d145963136b564806f6584647af45ab98946660d399ec4da79cae036/ruff-0.14.7-py3-none-win_amd64.whl", hash = "sha256:e17a20ad0d3fad47a326d773a042b924d3ac31c6ca6deb6c72e9e6b5f661a7c6", size = 14531726, upload-time = "2025-11-28T20:54:59.121Z" }, - { url = "https://files.pythonhosted.org/packages/1d/d2/1637f4360ada6a368d3265bf39f2cf737a0aaab15ab520fc005903e883f8/ruff-0.14.7-py3-none-win_arm64.whl", hash = "sha256:be4d653d3bea1b19742fcc6502354e32f65cd61ff2fbdb365803ef2c2aec6228", size = 13609215, upload-time = "2025-11-28T20:55:15.375Z" }, +version = "0.14.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/06/f71e3a86b2df0dfa2d2f72195941cd09b44f87711cb7fa5193732cb9a5fc/ruff-0.14.14.tar.gz", hash = "sha256:2d0f819c9a90205f3a867dbbd0be083bee9912e170fd7d9704cc8ae45824896b", size = 4515732, upload-time = "2026-01-22T22:30:17.527Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/89/20a12e97bc6b9f9f68343952da08a8099c57237aef953a56b82711d55edd/ruff-0.14.14-py3-none-linux_armv6l.whl", hash = "sha256:7cfe36b56e8489dee8fbc777c61959f60ec0f1f11817e8f2415f429552846aed", size = 10467650, upload-time = "2026-01-22T22:30:08.578Z" }, + { url = "https://files.pythonhosted.org/packages/a3/b1/c5de3fd2d5a831fcae21beda5e3589c0ba67eec8202e992388e4b17a6040/ruff-0.14.14-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6006a0082336e7920b9573ef8a7f52eec837add1265cc74e04ea8a4368cd704c", size = 10883245, upload-time = "2026-01-22T22:30:04.155Z" }, + { url = "https://files.pythonhosted.org/packages/b8/7c/3c1db59a10e7490f8f6f8559d1db8636cbb13dccebf18686f4e3c9d7c772/ruff-0.14.14-py3-none-macosx_11_0_arm64.whl", hash = "sha256:026c1d25996818f0bf498636686199d9bd0d9d6341c9c2c3b62e2a0198b758de", size = 10231273, upload-time = "2026-01-22T22:30:34.642Z" }, + { url = "https://files.pythonhosted.org/packages/a1/6e/5e0e0d9674be0f8581d1f5e0f0a04761203affce3232c1a1189d0e3b4dad/ruff-0.14.14-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f666445819d31210b71e0a6d1c01e24447a20b85458eea25a25fe8142210ae0e", size = 10585753, upload-time = "2026-01-22T22:30:31.781Z" }, + { url = "https://files.pythonhosted.org/packages/23/09/754ab09f46ff1884d422dc26d59ba18b4e5d355be147721bb2518aa2a014/ruff-0.14.14-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c0f18b922c6d2ff9a5e6c3ee16259adc513ca775bcf82c67ebab7cbd9da5bc8", size = 10286052, upload-time = "2026-01-22T22:30:24.827Z" }, + { url = "https://files.pythonhosted.org/packages/c8/cc/e71f88dd2a12afb5f50733851729d6b571a7c3a35bfdb16c3035132675a0/ruff-0.14.14-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1629e67489c2dea43e8658c3dba659edbfd87361624b4040d1df04c9740ae906", size = 11043637, upload-time = "2026-01-22T22:30:13.239Z" }, + { url = "https://files.pythonhosted.org/packages/67/b2/397245026352494497dac935d7f00f1468c03a23a0c5db6ad8fc49ca3fb2/ruff-0.14.14-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:27493a2131ea0f899057d49d303e4292b2cae2bb57253c1ed1f256fbcd1da480", size = 12194761, upload-time = "2026-01-22T22:30:22.542Z" }, + { url = "https://files.pythonhosted.org/packages/5b/06/06ef271459f778323112c51b7587ce85230785cd64e91772034ddb88f200/ruff-0.14.14-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01ff589aab3f5b539e35db38425da31a57521efd1e4ad1ae08fc34dbe30bd7df", size = 12005701, upload-time = "2026-01-22T22:30:20.499Z" }, + { url = "https://files.pythonhosted.org/packages/41/d6/99364514541cf811ccc5ac44362f88df66373e9fec1b9d1c4cc830593fe7/ruff-0.14.14-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1cc12d74eef0f29f51775f5b755913eb523546b88e2d733e1d701fe65144e89b", size = 11282455, upload-time = "2026-01-22T22:29:59.679Z" }, + { url = "https://files.pythonhosted.org/packages/ca/71/37daa46f89475f8582b7762ecd2722492df26421714a33e72ccc9a84d7a5/ruff-0.14.14-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb8481604b7a9e75eff53772496201690ce2687067e038b3cc31aaf16aa0b974", size = 11215882, upload-time = "2026-01-22T22:29:57.032Z" }, + { url = "https://files.pythonhosted.org/packages/2c/10/a31f86169ec91c0705e618443ee74ede0bdd94da0a57b28e72db68b2dbac/ruff-0.14.14-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:14649acb1cf7b5d2d283ebd2f58d56b75836ed8c6f329664fa91cdea19e76e66", size = 11180549, upload-time = "2026-01-22T22:30:27.175Z" }, + { url = "https://files.pythonhosted.org/packages/fd/1e/c723f20536b5163adf79bdd10c5f093414293cdf567eed9bdb7b83940f3f/ruff-0.14.14-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e8058d2145566510790eab4e2fad186002e288dec5e0d343a92fe7b0bc1b3e13", size = 10543416, upload-time = "2026-01-22T22:30:01.964Z" }, + { url = "https://files.pythonhosted.org/packages/3e/34/8a84cea7e42c2d94ba5bde1d7a4fae164d6318f13f933d92da6d7c2041ff/ruff-0.14.14-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e651e977a79e4c758eb807f0481d673a67ffe53cfa92209781dfa3a996cf8412", size = 10285491, upload-time = "2026-01-22T22:30:29.51Z" }, + { url = "https://files.pythonhosted.org/packages/55/ef/b7c5ea0be82518906c978e365e56a77f8de7678c8bb6651ccfbdc178c29f/ruff-0.14.14-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cc8b22da8d9d6fdd844a68ae937e2a0adf9b16514e9a97cc60355e2d4b219fc3", size = 10733525, upload-time = "2026-01-22T22:30:06.499Z" }, + { url = "https://files.pythonhosted.org/packages/6a/5b/aaf1dfbcc53a2811f6cc0a1759de24e4b03e02ba8762daabd9b6bd8c59e3/ruff-0.14.14-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:16bc890fb4cc9781bb05beb5ab4cd51be9e7cb376bf1dd3580512b24eb3fda2b", size = 11315626, upload-time = "2026-01-22T22:30:36.848Z" }, + { url = "https://files.pythonhosted.org/packages/2c/aa/9f89c719c467dfaf8ad799b9bae0df494513fb21d31a6059cb5870e57e74/ruff-0.14.14-py3-none-win32.whl", hash = "sha256:b530c191970b143375b6a68e6f743800b2b786bbcf03a7965b06c4bf04568167", size = 10502442, upload-time = "2026-01-22T22:30:38.93Z" }, + { url = "https://files.pythonhosted.org/packages/87/44/90fa543014c45560cae1fffc63ea059fb3575ee6e1cb654562197e5d16fb/ruff-0.14.14-py3-none-win_amd64.whl", hash = "sha256:3dde1435e6b6fe5b66506c1dff67a421d0b7f6488d466f651c07f4cab3bf20fd", size = 11630486, upload-time = "2026-01-22T22:30:10.852Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6a/40fee331a52339926a92e17ae748827270b288a35ef4a15c9c8f2ec54715/ruff-0.14.14-py3-none-win_arm64.whl", hash = "sha256:56e6981a98b13a32236a72a8da421d7839221fa308b223b9283312312e5ac76c", size = 10920448, upload-time = "2026-01-22T22:30:15.417Z" }, ] [[package]] @@ -2796,93 +2818,99 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, - { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, - { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, - { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, - { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, - { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, - { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, - { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, - { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, - { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, + { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, + { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, + { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, + { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, + { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, + { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, + { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, + { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, + { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, + { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, + { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, ] [[package]] name = "tomli" -version = "2.3.0" +version = "2.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, - { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, - { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, - { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, - { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, - { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, - { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, - { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, - { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, - { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, - { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, - { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, - { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, - { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, - { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, - { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, - { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, - { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, - { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, - { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, - { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, - { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, - { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, - { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, - { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, - { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, - { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, - { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, - { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, - { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, - { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, - { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, - { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, - { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, - { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, - { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, - { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, - { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, - { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, - { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" }, + { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" }, + { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" }, + { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" }, + { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" }, + { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" }, + { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" }, + { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" }, + { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" }, + { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" }, + { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" }, + { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" }, + { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" }, + { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" }, + { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" }, + { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" }, + { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" }, + { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" }, + { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" }, + { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" }, + { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" }, + { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" }, + { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" }, + { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" }, + { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" }, + { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" }, + { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" }, + { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" }, + { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" }, + { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" }, + { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" }, + { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" }, + { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" }, ] [[package]] name = "tqdm" -version = "4.67.1" +version = "4.67.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +sdist = { url = "https://files.pythonhosted.org/packages/27/89/4b0001b2dab8df0a5ee2787dcbe771de75ded01f18f1f8d53dedeea2882b/tqdm-4.67.2.tar.gz", hash = "sha256:649aac53964b2cb8dec76a14b405a4c0d13612cb8933aae547dd144eacc99653", size = 169514, upload-time = "2026-01-30T23:12:06.555Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e2/31eac96de2915cf20ccaed0225035db149dfb9165a9ed28d4b252ef3f7f7/tqdm-4.67.2-py3-none-any.whl", hash = "sha256:9a12abcbbff58b6036b2167d9d3853042b9d436fe7330f06ae047867f2f8e0a7", size = 78354, upload-time = "2026-01-30T23:12:04.368Z" }, ] [[package]] name = "transformers" -version = "4.57.3" +version = "5.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -2891,19 +2919,19 @@ dependencies = [ { name = "packaging" }, { name = "pyyaml" }, { name = "regex" }, - { name = "requests" }, { name = "safetensors" }, { name = "tokenizers" }, { name = "tqdm" }, + { name = "typer-slim" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dd/70/d42a739e8dfde3d92bb2fff5819cbf331fe9657323221e79415cd5eb65ee/transformers-4.57.3.tar.gz", hash = "sha256:df4945029aaddd7c09eec5cad851f30662f8bd1746721b34cc031d70c65afebc", size = 10139680, upload-time = "2025-11-25T15:51:30.139Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/79/845941711811789c85fb7e2599cea425a14a07eda40f50896b9d3fda7492/transformers-5.0.0.tar.gz", hash = "sha256:5f5634efed6cf76ad068cc5834c7adbc32db78bbd6211fb70df2325a9c37dec8", size = 8424830, upload-time = "2026-01-26T10:46:46.813Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/6b/2f416568b3c4c91c96e5a365d164f8a4a4a88030aa8ab4644181fdadce97/transformers-4.57.3-py3-none-any.whl", hash = "sha256:c77d353a4851b1880191603d36acb313411d3577f6e2897814f333841f7003f4", size = 11993463, upload-time = "2025-11-25T15:51:26.493Z" }, + { url = "https://files.pythonhosted.org/packages/52/f3/ac976fa8e305c9e49772527e09fbdc27cc6831b8a2f6b6063406626be5dd/transformers-5.0.0-py3-none-any.whl", hash = "sha256:587086f249ce64c817213cf36afdb318d087f790723e9b3d4500b97832afd52d", size = 10142091, upload-time = "2026-01-26T10:46:43.88Z" }, ] [[package]] name = "typer" -version = "0.20.0" +version = "0.21.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, @@ -2911,9 +2939,22 @@ dependencies = [ { name = "shellingham" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8f/28/7c85c8032b91dbe79725b6f17d2fffc595dff06a35c7a30a37bef73a1ab4/typer-0.20.0.tar.gz", hash = "sha256:1aaf6494031793e4876fb0bacfa6a912b551cf43c1e63c800df8b1a866720c37", size = 106492, upload-time = "2025-10-20T17:03:49.445Z" } +sdist = { url = "https://files.pythonhosted.org/packages/36/bf/8825b5929afd84d0dabd606c67cd57b8388cb3ec385f7ef19c5cc2202069/typer-0.21.1.tar.gz", hash = "sha256:ea835607cd752343b6b2b7ce676893e5a0324082268b48f27aa058bdb7d2145d", size = 110371, upload-time = "2026-01-06T11:21:10.989Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/78/64/7713ffe4b5983314e9d436a90d5bd4f63b6054e2aca783a3cfc44cb95bbf/typer-0.20.0-py3-none-any.whl", hash = "sha256:5b463df6793ec1dca6213a3cf4c0f03bc6e322ac5e16e13ddd622a889489784a", size = 47028, upload-time = "2025-10-20T17:03:47.617Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/d9257dd49ff2ca23ea5f132edf1281a0c4f9de8a762b9ae399b670a59235/typer-0.21.1-py3-none-any.whl", hash = "sha256:7985e89081c636b88d172c2ee0cfe33c253160994d47bdfdc302defd7d1f1d01", size = 47381, upload-time = "2026-01-06T11:21:09.824Z" }, +] + +[[package]] +name = "typer-slim" +version = "0.21.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/d4/064570dec6358aa9049d4708e4a10407d74c99258f8b2136bb8702303f1a/typer_slim-0.21.1.tar.gz", hash = "sha256:73495dd08c2d0940d611c5a8c04e91c2a0a98600cbd4ee19192255a233b6dbfd", size = 110478, upload-time = "2026-01-06T11:21:11.176Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/0a/4aca634faf693e33004796b6cee0ae2e1dba375a800c16ab8d3eff4bb800/typer_slim-0.21.1-py3-none-any.whl", hash = "sha256:6e6c31047f171ac93cc5a973c9e617dbc5ab2bddc4d0a3135dc161b4e2020e0d", size = 47444, upload-time = "2026-01-06T11:21:12.441Z" }, ] [[package]] @@ -2939,11 +2980,11 @@ wheels = [ [[package]] name = "tzdata" -version = "2025.2" +version = "2025.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/a7/c202b344c5ca7daf398f3b8a477eeb205cf3b6f32e7ec3a6bac0629ca975/tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7", size = 196772, upload-time = "2025-12-13T17:45:35.667Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" }, ] [[package]] @@ -3017,11 +3058,11 @@ wheels = [ [[package]] name = "urllib3" -version = "2.5.0" +version = "2.6.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, + { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] [[package]] @@ -3032,11 +3073,11 @@ sdist = { url = "https://files.pythonhosted.org/packages/ce/63/f42f5aa951ebf2c8d [[package]] name = "wcwidth" -version = "0.2.14" +version = "0.5.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/62/a7c072fbfefb2980a00f99ca994279cb9ecf310cb2e6b2a4d2a28fe192b3/wcwidth-0.5.3.tar.gz", hash = "sha256:53123b7af053c74e9fe2e92ac810301f6139e64379031f7124574212fb3b4091", size = 157587, upload-time = "2026-01-31T03:52:10.92Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, + { url = "https://files.pythonhosted.org/packages/3c/c1/d73f12f8cdb1891334a2ccf7389eed244d3941e74d80dd220badb937f3fb/wcwidth-0.5.3-py3-none-any.whl", hash = "sha256:d584eff31cd4753e1e5ff6c12e1edfdb324c995713f75d26c29807bb84bf649e", size = 92981, upload-time = "2026-01-31T03:52:09.14Z" }, ] [[package]]