diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..4ae057d --- /dev/null +++ b/.dockerignore @@ -0,0 +1,13 @@ +.venv/ +.env +.git/ +.github/ +.pytest_cache/ +__pycache__/ +*.pyc +tests/ +docs/ +.ruff_cache/ +.mypy_cache/ +htmlcov/ +*.egg-info/ diff --git a/.env.example b/.env.example index 363be8d..9275f9d 100644 --- a/.env.example +++ b/.env.example @@ -1,6 +1,7 @@ # Proxmox Connection PROXMOX_HOST=192.168.1.100 PROXMOX_PORT=8006 +# WARNING: Defaults to false (allows self-signed certs). Set true for production. PROXMOX_VERIFY_SSL=false # Auth Option 1: API Token (preferred) @@ -15,7 +16,14 @@ PROXMOX_TOKEN_VALUE=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx PROXMOX_DRY_RUN=false PROXMOX_ALLOWED_NODES= PROXMOX_PROTECTED_VMIDS= -PROXMOX_MAX_CONCURRENT_TASKS=5 + +# SSH Configuration (for disk management tools) +PROXMOX_SSH_USER=root +PROXMOX_SSH_PORT=22 +# PROXMOX_SSH_KEY_PATH=~/.ssh/id_rsa # Path to SSH private key (recommended) +# PROXMOX_SSH_PASSWORD= # SSH password. Falls back to PROXMOX_PASSWORD if empty. +PROXMOX_SSH_HOST_KEY_CHECKING=true +# PROXMOX_SSH_KNOWN_HOSTS= # Path to known_hosts file (default: ~/.ssh/known_hosts) # Server MCP_TRANSPORT=stdio diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..adad2d9 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,35 @@ +name: CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 + - run: uv python install 3.12 + - run: uv sync --dev + - run: uv run ruff check src/ tests/ + - run: uv run ruff format --check src/ tests/ + + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.11", "3.12", "3.13"] + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 + - run: uv python install ${{ matrix.python-version }} + - run: uv sync --dev + - name: Run tests + run: uv run pytest tests/ -v --cov=src/proxmox_mcp --cov-report=term-missing + env: + PROXMOX_HOST: "test" + PROXMOX_TOKEN_NAME: "test@pam!ci" + PROXMOX_TOKEN_VALUE: "00000000-0000-0000-0000-000000000000" diff --git a/CLAUDE.md b/CLAUDE.md index 8057653..b95ac75 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Project Overview -MCP (Model Context Protocol) server for Proxmox VE infrastructure management. Exposes 58 tools, 10 resources, and 6 prompts via FastMCP. Python 3.11+, async-first design wrapping the synchronous `proxmoxer` library with `asyncio.to_thread()`. +MCP (Model Context Protocol) server for Proxmox VE infrastructure management. Exposes 91 tools, 10 resources, and 6 prompts via FastMCP. Python 3.11+, async-first design wrapping the synchronous `proxmoxer` library with `asyncio.to_thread()`. ## Commands @@ -40,11 +40,12 @@ mypy src/proxmox_mcp # Type check - **server.py** — Creates `mcp` (FastMCP) and `proxmox_client` (ProxmoxClient) at module level. Tool/resource/prompt modules import these and register via decorators. Side-effect imports at bottom of file register everything with mcp. - **config.py** — `ProxmoxConfig(BaseSettings)` loads from `.env`. Uses `model_validator` to parse comma-separated env vars into typed lists (protected_vmids, allowed_nodes). -- **client.py** — `ProxmoxClient` wraps proxmoxer. Key safety methods: `check_protected()`, `validate_node()`, `resolve_node_for_vmid()`, `dry_run_response()`. All Proxmox API calls go through `api_call()` which runs sync proxmoxer in a thread. -- **tools/** — 8 domain modules (cluster, node, vm, container, storage, task, backup, network). Each has local `get_mcp()`/`get_client()` helpers that import from `server.py`. +- **client.py** — `ProxmoxClient` wraps proxmoxer. Key safety methods: `check_protected()`, `validate_node()`, `resolve_node()` (validates + auto-detects node for VMID), `dry_run_response()`. All Proxmox API calls go through `api_call()` which runs sync proxmoxer in a thread. +- **tools/** — 9 domain modules (cluster, node, vm, container, storage, task, backup, network, disk). Each has local `get_mcp()`/`get_client()` helpers that import from `server.py`. - **resources/resources.py** — 10 read-only `proxmox://` URI resources returning JSON strings. - **prompts/prompts.py** — 6 workflow prompt templates. -- **utils/** — `errors.py` (exception hierarchy), `validators.py` (validate_vmid, validate_node_name), `formatters.py` (format_vm_summary, format_bytes, etc.) +- **utils/** — `errors.py` (exception hierarchy), `validators.py` (validate_vmid, validate_node_name), `sanitizers.py` (input sanitization for disk/shell params), `formatters.py` (format_vm_summary, format_bytes, etc.) +- **ssh.py** — `SSHExecutor` for remote command execution via paramiko (used by disk tools). ### Conventions diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..bd32652 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,23 @@ +FROM python:3.12-slim AS builder + +WORKDIR /app + +COPY pyproject.toml . +COPY src/ src/ + +RUN pip install --no-cache-dir . + +FROM python:3.12-slim + +WORKDIR /app + +COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages +COPY --from=builder /usr/local/bin/proxmox-mcp /usr/local/bin/proxmox-mcp +COPY src/ src/ + +ENV MCP_TRANSPORT=streamable-http +ENV MCP_HTTP_PORT=3001 + +EXPOSE 3001 + +CMD ["proxmox-mcp"] diff --git a/README.md b/README.md index 186058f..cb8ab1a 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,285 @@ # ProxmoxMCP +MCP (Model Context Protocol) server for managing Proxmox VE infrastructure through AI assistants like Claude. Exposes **91 tools**, **10 resources**, and **6 prompt templates** via FastMCP. + +## Features + +- **91 tools** across 9 domains: VMs, containers, storage, networking, backups, cluster, nodes, tasks, and disk management +- **Safety guards**: protected VMIDs, node allowlists, dry-run mode, confirmation prompts for destructive operations +- **Async-first**: all Proxmox API calls run via `asyncio.to_thread()` for non-blocking operation +- **SSH disk management**: partition, format, mount, and unmount physical disks via SSH +- **Structured JSON responses**: consistent `{"status": "success"}` / `{"status": "error"}` format + +## Quick Start + +### 1. Install + +```bash +pip install -e . +``` + +Or with development dependencies: + +```bash +pip install -e ".[dev]" +``` + +### 2. Configure + +Copy `.env.example` to `.env` and set your Proxmox connection details: + +```bash +cp .env.example .env +``` + +Required settings: + +```env +PROXMOX_HOST=192.168.1.100 +PROXMOX_TOKEN_NAME=root@pam!mcp-token +PROXMOX_TOKEN_VALUE=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +``` + +### 3. Run + +```bash +# CLI entry point +proxmox-mcp + +# Or as a module +python -m proxmox_mcp +``` + +### 4. Connect to Claude Desktop + +Add to your Claude Desktop config (`claude_desktop_config.json`): + +```json +{ + "mcpServers": { + "proxmox": { + "command": "proxmox-mcp", + "env": { + "PROXMOX_HOST": "192.168.1.100", + "PROXMOX_TOKEN_NAME": "root@pam!mcp-token", + "PROXMOX_TOKEN_VALUE": "your-token-value" + } + } + } +} +``` + +## Tool Inventory + +### VM Management (20 tools) + +| Tool | Description | +|------|-------------| +| `list_vms` | List all QEMU VMs across the cluster | +| `get_vm_status` | Get detailed VM status | +| `get_vm_config` | Get full VM configuration | +| `get_vm_rrd_data` | Get VM performance metrics over time | +| `start_vm` | Start a stopped VM | +| `stop_vm` | Hard stop a VM | +| `shutdown_vm` | Graceful ACPI shutdown | +| `reboot_vm` | Reboot a VM via ACPI | +| `suspend_vm` | Suspend/pause a running VM | +| `resume_vm` | Resume a suspended VM | +| `reset_vm` | Hard reset a VM | +| `clone_vm` | Clone a VM (full or linked) | +| `migrate_vm` | Live-migrate a VM to another node | +| `create_vm` | Create a new QEMU VM | +| `delete_vm` | Permanently delete a VM | +| `resize_vm_disk` | Resize a VM disk | +| `convert_vm_to_template` | Convert a VM to a template | +| `modify_vm_config` | Modify VM configuration | +| `set_vm_cloudinit` | Configure cloud-init settings | +| `regenerate_cloudinit_image` | Regenerate cloud-init drive | + +### Container Management (12 tools) + +| Tool | Description | +|------|-------------| +| `list_containers` | List all LXC containers | +| `get_container_status` | Get container status | +| `get_container_config` | Get container configuration | +| `start_container` | Start a container | +| `stop_container` | Stop a container | +| `shutdown_container` | Graceful container shutdown | +| `reboot_container` | Reboot a container | +| `clone_container` | Clone a container | +| `migrate_container` | Migrate a container | +| `create_container` | Create a new container | +| `delete_container` | Delete a container | +| `modify_container_config` | Modify container config | + +### Cluster & Access Management (17 tools) + +| Tool | Description | +|------|-------------| +| `get_cluster_status` | Cluster status and quorum | +| `get_cluster_resources` | All resources across the cluster | +| `get_cluster_log` | Recent cluster log entries | +| `get_next_vmid` | Next available VMID | +| `list_pools` | List resource pools | +| `create_pool` | Create a resource pool | +| `modify_pool` | Modify pool membership | +| `delete_pool` | Delete a resource pool | +| `list_users` | List all users | +| `create_user` | Create a new user | +| `delete_user` | Delete a user | +| `list_roles` | List available roles | +| `set_user_permission` | Set ACL permissions | +| `list_ha_resources` | List HA-managed resources | +| `create_ha_resource` | Add VM/CT to HA management | +| `modify_ha_resource` | Modify HA resource settings | +| `delete_ha_resource` | Remove from HA management | + +### Node Management (8 tools) + +| Tool | Description | +|------|-------------| +| `list_nodes` | List all cluster nodes | +| `get_node_status` | Detailed node status | +| `get_node_services` | Node system services | +| `get_node_network` | Node network interfaces | +| `get_node_storage` | Node storage backends | +| `get_node_syslog` | Node syslog entries | +| `reboot_node` | Reboot a node | +| `shutdown_node` | Shut down a node | + +### Storage Management (8 tools) + +| Tool | Description | +|------|-------------| +| `list_storage` | List all storage backends | +| `get_storage_status` | Storage usage details | +| `list_storage_content` | List storage contents | +| `get_available_isos` | List available ISOs | +| `get_available_templates` | List container templates | +| `add_storage` | Register a new storage backend | +| `remove_storage` | Unregister a storage backend | +| `download_to_storage` | Download ISO/template from URL | + +### Backup & Snapshot Management (10 tools) + +| Tool | Description | +|------|-------------| +| `create_snapshot` | Create a VM/CT snapshot | +| `list_snapshots` | List all snapshots | +| `rollback_snapshot` | Rollback to a snapshot | +| `delete_snapshot` | Delete a snapshot | +| `create_backup` | Start a vzdump backup | +| `list_backups` | List available backups | +| `restore_backup` | Restore from backup | +| `list_backup_jobs` | List scheduled backup jobs | +| `create_backup_job` | Create a backup schedule | +| `delete_backup_job` | Delete a backup schedule | + +### Network & Firewall (7 tools) + +| Tool | Description | +|------|-------------| +| `get_node_firewall_rules` | List node firewall rules | +| `get_vm_firewall_rules` | List VM/CT firewall rules | +| `get_vm_interfaces` | Get VM/CT network interfaces | +| `create_node_firewall_rule` | Create a node firewall rule | +| `delete_node_firewall_rule` | Delete a node firewall rule | +| `create_vm_firewall_rule` | Create a VM/CT firewall rule | +| `delete_vm_firewall_rule` | Delete a VM/CT firewall rule | + +### Task Tracking (4 tools) + +| Tool | Description | +|------|-------------| +| `list_tasks` | List recent tasks | +| `get_task_status` | Get task status by UPID | +| `get_task_log` | Get task log output | +| `wait_for_task` | Wait for task completion | + +### Disk Management (5 tools, SSH-based) + +| Tool | Description | +|------|-------------| +| `list_physical_disks` | List physical disks on a node | +| `partition_disk` | Create partitions on a disk | +| `format_disk` | Format a partition | +| `create_mount_point` | Mount a filesystem with fstab | +| `unmount_path` | Unmount and clean up fstab | + +## Configuration Reference + +| Variable | Default | Description | +|----------|---------|-------------| +| `PROXMOX_HOST` | *required* | Proxmox host IP or hostname | +| `PROXMOX_PORT` | `8006` | API port | +| `PROXMOX_VERIFY_SSL` | `false` | Verify SSL certificates | +| `PROXMOX_TOKEN_NAME` | | API token name (e.g. `root@pam!token`) | +| `PROXMOX_TOKEN_VALUE` | | API token value | +| `PROXMOX_USER` | | Username (fallback auth) | +| `PROXMOX_PASSWORD` | | Password (fallback auth) | +| `PROXMOX_DRY_RUN` | `false` | Simulate write operations | +| `PROXMOX_PROTECTED_VMIDS` | | Comma-separated VMIDs to protect from modification | +| `PROXMOX_ALLOWED_NODES` | | Comma-separated node allowlist (empty = all) | +| `PROXMOX_SSH_USER` | `root` | SSH user for disk tools | +| `PROXMOX_SSH_PORT` | `22` | SSH port | +| `PROXMOX_SSH_KEY_PATH` | | Path to SSH private key | +| `PROXMOX_SSH_HOST_KEY_CHECKING` | `true` | Verify SSH host keys | +| `MCP_TRANSPORT` | `stdio` | Transport: `stdio` or `streamable-http` | +| `MCP_HTTP_PORT` | `3001` | HTTP port (when using streamable-http) | +| `LOG_LEVEL` | `INFO` | Logging level | + +## Safety Features + +- **Protected VMIDs**: VMs in `PROXMOX_PROTECTED_VMIDS` cannot be modified, stopped, or deleted +- **Node allowlist**: When `PROXMOX_ALLOWED_NODES` is set, operations are restricted to listed nodes +- **Dry-run mode**: Set `PROXMOX_DRY_RUN=true` to simulate all write operations +- **Confirmation prompts**: Destructive operations (delete, rollback, restore) require `confirm=True` +- **Input validation**: VMIDs, node names, snapshot names, and storage IDs are validated against injection +- **Config key allowlists**: `modify_vm_config` blocks dangerous keys like `hookscript` and `hostpci` +- **SSH host key verification**: Enabled by default for disk management operations + +## Development + +```bash +# Install with dev dependencies +pip install -e ".[dev]" + +# Run tests +pytest tests/ + +# Run a single test file +pytest tests/test_vm_tools.py + +# Run with coverage +pytest tests/ --cov=src/proxmox_mcp --cov-report=html + +# Lint +ruff check src/ tests/ + +# Format +ruff format src/ tests/ + +# Type check +mypy src/proxmox_mcp +``` + +## Architecture + +``` +FastMCP server + -> @mcp.tool() async function (tools/) + -> ProxmoxClient.api_call() + -> asyncio.to_thread(proxmoxer) + -> Proxmox REST API +``` + +- **9 tool modules**: `vm`, `container`, `cluster`, `node`, `storage`, `backup`, `network`, `task`, `disk` +- **ProxmoxClient** (`client.py`): wraps proxmoxer with safety guards and async support +- **Config** (`config.py`): pydantic-settings loading from `.env` +- **Resources**: 10 read-only `proxmox://` URI resources returning cluster state as JSON +- **Prompts**: 6 workflow templates for common operations + +## License + +MIT diff --git a/docs/.DS_Store b/docs/.DS_Store new file mode 100644 index 0000000..5455949 Binary files /dev/null and b/docs/.DS_Store differ diff --git a/docs/PROJECT_REVIEW.md b/docs/PROJECT_REVIEW.md new file mode 100644 index 0000000..de997f2 --- /dev/null +++ b/docs/PROJECT_REVIEW.md @@ -0,0 +1,587 @@ +# ProxmoxMCP — Project Review & Implementation Roadmap + +**Date:** 2026-02-20 +**Branch:** `feature/disk-storage-management` +**Reviewed from:** commit `ac1f0f9` + uncommitted disk management work + +--- + +## Table of Contents + +1. [Project Overview](#1-project-overview) +2. [Complete Tool Inventory](#2-complete-tool-inventory) +3. [Resources & Prompts](#3-resources--prompts) +4. [Architecture Analysis](#4-architecture-analysis) +5. [Security Issues](#5-security-issues) +6. [Test Coverage Analysis](#6-test-coverage-analysis) +7. [Code Quality Issues](#7-code-quality-issues) +8. [Configuration Gaps](#8-configuration-gaps) +9. [Developer Experience](#9-developer-experience) +10. [Missing Proxmox API Coverage](#10-missing-proxmox-api-coverage) +11. [Implementation Roadmap](#11-implementation-roadmap) + +--- + +## 1. Project Overview + +MCP (Model Context Protocol) server for Proxmox VE infrastructure management. Exposes **65 tools**, **10 resources**, and **6 prompts** via FastMCP. Python 3.11+, async-first design wrapping the synchronous `proxmoxer` library with `asyncio.to_thread()`. Disk management tools use SSH via `paramiko`. + +### Tech Stack + +| Component | Technology | +|-----------|-----------| +| MCP Framework | FastMCP | +| Proxmox API | proxmoxer | +| SSH Layer | paramiko | +| Config | pydantic-settings (.env) | +| Testing | pytest (asyncio_mode=auto) | +| Linting | ruff, mypy | +| Python | 3.11+ | + +--- + +## 2. Complete Tool Inventory + +**65 tools** across 9 domain modules. + +> Note: CLAUDE.md currently states "58 tools" — this is outdated after adding disk and storage management tools. + +### VM Management — `tools/vm.py` (16 tools) + +| Tool | Description | +|------|-------------| +| `list_vms` | List all VMs across the cluster | +| `get_vm_status` | Get VM runtime status | +| `get_vm_config` | Get VM configuration | +| `get_vm_rrd_data` | Get VM performance metrics (RRD) | +| `start_vm` | Start a VM | +| `stop_vm` | Force-stop a VM | +| `shutdown_vm` | Graceful ACPI shutdown | +| `reboot_vm` | Reboot a VM | +| `suspend_vm` | Suspend a VM to RAM | +| `resume_vm` | Resume a suspended VM | +| `reset_vm` | Hard-reset a VM | +| `clone_vm` | Clone a VM (full or linked) | +| `migrate_vm` | Live-migrate a VM to another node | +| `create_vm` | Create a new VM | +| `delete_vm` | Delete a VM (with safety checks) | +| `modify_vm_config` | Modify VM configuration | + +### Container Management — `tools/container.py` (12 tools) + +| Tool | Description | +|------|-------------| +| `list_containers` | List all LXC containers | +| `get_container_status` | Get container runtime status | +| `get_container_config` | Get container configuration | +| `start_container` | Start a container | +| `stop_container` | Force-stop a container | +| `shutdown_container` | Graceful shutdown | +| `reboot_container` | Reboot a container | +| `clone_container` | Clone a container | +| `migrate_container` | Migrate a container to another node | +| `create_container` | Create a new LXC container | +| `delete_container` | Delete a container (with safety checks) | +| `modify_container_config` | Modify container configuration | + +### Storage Management — `tools/storage.py` (7 tools) + +| Tool | Description | +|------|-------------| +| `list_storage` | List all storage backends | +| `get_storage_status` | Get storage usage/status | +| `list_storage_content` | List contents of a storage | +| `get_available_isos` | List available ISO images | +| `get_available_templates` | List available CT templates | +| `add_storage` | Register a new storage backend | +| `remove_storage` | Unregister a storage backend | + +### Backup & Snapshot — `tools/backup.py` (7 tools) + +| Tool | Description | +|------|-------------| +| `create_snapshot` | Create a VM/CT snapshot | +| `list_snapshots` | List snapshots for a VM/CT | +| `rollback_snapshot` | Rollback to a snapshot | +| `delete_snapshot` | Delete a snapshot | +| `create_backup` | Create a vzdump backup | +| `list_backups` | List available backups | +| `restore_backup` | Restore from a backup | + +### Node Management — `tools/node.py` (6 tools) + +| Tool | Description | +|------|-------------| +| `list_nodes` | List all cluster nodes with status | +| `get_node_status` | Get detailed node status | +| `get_node_services` | List services on a node | +| `get_node_network` | Get network interface info | +| `get_node_storage` | Get local storage info | +| `get_node_syslog` | Read node system log | + +### Disk Management — `tools/disk.py` (5 tools, SSH-based) + +| Tool | Description | +|------|-------------| +| `list_physical_disks` | List physical block devices on a node | +| `partition_disk` | Create a single partition (GPT/MBR) | +| `format_disk` | Format a partition with ext4/xfs/vfat | +| `create_mount_point` | Mount + fstab entry + optional Proxmox storage registration | +| `unmount_path` | Unmount + remove fstab entry | + +### Cluster Management — `tools/cluster.py` (5 tools) + +| Tool | Description | +|------|-------------| +| `get_cluster_status` | Get cluster status and quorum | +| `get_cluster_resources` | List all cluster resources | +| `get_cluster_log` | Read cluster log | +| `get_next_vmid` | Get next available VMID | +| `list_pools` | List resource pools | + +### Task Management — `tools/task.py` (4 tools) + +| Tool | Description | +|------|-------------| +| `list_tasks` | List recent tasks on a node | +| `get_task_status` | Get task status by UPID | +| `get_task_log` | Read task log output | +| `wait_for_task` | Poll until a task completes | + +### Network/Firewall — `tools/network.py` (3 tools) + +| Tool | Description | +|------|-------------| +| `get_node_firewall_rules` | List node firewall rules | +| `get_vm_firewall_rules` | List VM firewall rules | +| `get_vm_interfaces` | Get VM network interfaces | + +--- + +## 3. Resources & Prompts + +### Resources (10) + +All resources are read-only and return JSON strings via `proxmox://` URIs. + +| URI | Description | +|-----|-------------| +| `proxmox://cluster/status` | Cluster status | +| `proxmox://cluster/resources` | All cluster resources | +| `proxmox://nodes` | Node list | +| `proxmox://node/{node}/status` | Node detail | +| `proxmox://vms` | All VMs | +| `proxmox://containers` | All containers | +| `proxmox://vm/{vmid}` | VM detail | +| `proxmox://container/{vmid}` | Container detail | +| `proxmox://storage` | Storage overview | +| `proxmox://tasks/recent` | Recent tasks | + +### Prompts (6) + +| Prompt | Parameters | Purpose | +|--------|------------|---------| +| `infrastructure_overview` | None | Full infrastructure summary | +| `capacity_planning` | None | Resource capacity analysis | +| `vm_deployment` | name, purpose, os | VM deployment guide | +| `disaster_recovery_check` | None | DR readiness check | +| `security_audit` | None | Security posture review | +| `troubleshoot_vm` | vmid | VM troubleshooting guide | + +--- + +## 4. Architecture Analysis + +### Request Flow + +``` +MCP Client (Claude, etc.) + | + v +FastMCP server (server.py) + | @mcp.tool() decorated async function + v +tools/.py + | validate inputs (validators.py / sanitizers.py) + | check_protected() / validate_node() / is_dry_run + v +ProxmoxClient.api_call() -- for API tools + | asyncio.to_thread(proxmoxer) + v +Proxmox VE REST API (HTTPS) + +SSHExecutor.execute() -- for disk tools + | asyncio.to_thread(paramiko) + v +Proxmox Node Shell (SSH) +``` + +### Key Patterns + +**Deferred imports** — Every tool module uses `get_client()` / `get_mcp()` helpers to break circular imports between `server.py` and tool modules. This is correct and intentional. + +**Async wrapping** — Both `proxmoxer` (sync) and `paramiko` (sync) are wrapped via `asyncio.to_thread()` to maintain the async-first design. + +**Safety guards in client** — Three safety mechanisms live in `ProxmoxClient`, not in tools: +- `check_protected(vmid)` — prevents operations on protected VMIDs +- `validate_node(node)` — enforces node allowlist +- `dry_run_response()` — short-circuits all writes when dry-run mode is enabled + +**Confirmation gates** — Destructive disk operations require `confirm_destructive=True` to proceed. Without it, they return a preview of what would happen. + +### Architecture Issues + +#### Issue 1: `_resolve_node` Duplicated 4x + +An identical private helper `_resolve_node(client, vmid, node)` is copy-pasted in: +- `tools/vm.py:27` +- `tools/container.py:27` +- `tools/backup.py:26` +- `tools/network.py:25` + +**Recommendation:** Extract to `ProxmoxClient` as a method or to a shared utility. + +#### Issue 2: `vm_type` Not Validated in backup.py + +The `vm_type: str = "qemu"` parameter in backup tools is never validated. Passing `vm_type="invalid"` silently routes to the LXC API path via else-branch logic. + +**Recommendation:** Validate `vm_type in {"qemu", "lxc"}` at the top of each backup tool. + +#### Issue 3: SSH Routes to Single Host Only + +`ssh.py:106` always connects to `self.config.PROXMOX_HOST` regardless of the `node` parameter. In multi-node clusters, disk operations would always execute on the API host, not the target node. + +**Recommendation:** Support node-to-IP mapping or DNS-based resolution for multi-node setups. + +#### Issue 4: Dead Config — `PROXMOX_MAX_CONCURRENT_TASKS` + +`config.py:27` defines `PROXMOX_MAX_CONCURRENT_TASKS: int = 5` but it's never used anywhere in the codebase. Zero references outside config and test files. + +**Recommendation:** Either implement concurrency limiting in `api_call()` or remove the dead config. + +--- + +## 5. Security Issues + +### Critical + +#### S1: `paramiko.AutoAddPolicy()` — MITM Vulnerability +**File:** `ssh.py:40` + +Silently accepts any SSH host key, making all SSH connections vulnerable to man-in-the-middle attacks. Particularly severe since the SSH user defaults to `root`. + +**Recommendation:** Use `RejectPolicy` with a configurable `known_hosts` file. Add a `PROXMOX_SSH_KNOWN_HOSTS` config field. + +#### S2: `extra_config` Blocklist Too Narrow +**Files:** `vm.py:522-524`, `container.py:432-434` + +Only blocks `{"vmid", "node", "digest"}`. Allows dangerous keys like `hookscript` (arbitrary script execution), `lock`, `cdrom`, `net0`, `scsi0` to pass through unchecked. + +**Recommendation:** Switch to an allowlist of safe modifiable keys, or add a comprehensive blocklist including `hookscript`, `lock`, `serial*`, `usb*`, `hostpci*`. + +### High + +#### S3: SSH Command — UUID Not Quote-Sanitized +**File:** `disk.py` (create_mount_point fstab line construction) + +The `device_uuid` from `blkid` output is interpolated into a shell `echo` command. While `blkid` output is trusted, a compromised filesystem could theoretically produce a UUID containing shell metacharacters. + +**Recommendation:** Quote-escape UUID values or use `tee` with a heredoc instead of `echo`. + +#### S4: `start_vm` Missing `check_protected()` +**File:** `vm.py:124` + +`stop_vm` and `delete_vm` check protected status, but `start_vm` does not. Inconsistent safety gate coverage. + +### Medium + +#### S5: `PROXMOX_VERIFY_SSL` Defaults to `False` +**File:** `config.py:13` + +SSL verification disabled by default. Appropriate for homelab self-signed certs but poor for production. + +#### S6: SSH Password Fallback Undocumented +**File:** `ssh.py:50` + +`PROXMOX_SSH_PASSWORD` falls back to `PROXMOX_PASSWORD` silently. This implicit coupling between API and SSH credentials is not documented in `.env.example`. + +#### S7: `snapname` Not Validated +**File:** `backup.py:35` + +Snapshot names are passed directly to the API with no format validation. Proxmox requires `[a-zA-Z][a-zA-Z0-9_\-\.]*`. + +--- + +## 6. Test Coverage Analysis + +### Test Inventory + +| Test File | Count | Module Covered | +|-----------|-------|---------------| +| `test_disk_tools.py` | 35 | disk.py, add_storage, remove_storage | +| `test_sanitizers.py` | 32 | sanitizers.py | +| `test_vm_tools.py` | 16 | vm.py | +| `test_task_tools.py` | 15 | task.py | +| `test_storage_tools.py` | 12 | storage.py (list/get only) | +| `test_cluster_tools.py` | 12 | cluster.py | +| `test_node_tools.py` | 11 | node.py | +| `test_ssh.py` | 9 | ssh.py | +| `test_client.py` | 8 | client.py | +| `test_container_tools.py` | 6 | container.py (partial) | +| `test_config.py` | 6 | config.py | +| `test_backup_tools.py` | 5 | backup.py (partial) | +| `test_formatters.py` | 5 | formatters.py | +| `test_validators.py` | 4 | validators.py | +| `test_errors.py` | 3 | errors.py | +| `test_integration.py` | 3 | Stubs only (`@pytest.mark.skip`) | +| **Total** | **182** | | + +### Modules With Zero Tests + +| Module | Tools/Items | Impact | +|--------|------------|--------| +| `network.py` | 3 tools | No `test_network_tools.py` exists | +| `resources/resources.py` | 10 resources | No `test_resources.py` exists | +| `prompts/prompts.py` | 6 prompts | No `test_prompts.py` exists | + +### Partial Coverage + +| Module | Tested | Total | Missing | +|--------|--------|-------|---------| +| `container.py` | 6 of 12 | 50% | `get_container_config`, `shutdown_container`, `reboot_container`, `clone_container`, `migrate_container`, `modify_container_config` | +| `backup.py` | 5 of 7 | 71% | `delete_snapshot`, `restore_backup` | + +### Test Architecture Note + +The `add_storage` and `remove_storage` tests are in `test_disk_tools.py` instead of `test_storage_tools.py`. This is a file placement issue — they test `storage.py` functions. + +--- + +## 7. Code Quality Issues + +### Q1: Logging Inconsistency + +~36 logger calls in `cluster.py`, `node.py`, `task.py`, `storage.py` use f-strings (eager evaluation): +```python +logger.error(f"Failed to list storage: {e}") # BAD: always evaluates +``` + +Newer modules (`vm.py`, `container.py`, `backup.py`, `disk.py`) correctly use `%` formatting: +```python +logger.info("Starting VM %d on %s", vmid, node) # GOOD: lazy evaluation +``` + +**Recommendation:** Standardize on `%` formatting. Enable ruff rule `G` (flake8-logging-format) to enforce. + +### Q2: Ruff Configuration Minimal + +`pyproject.toml` only configures `target-version` and `line-length`. No rule selection, no ignores, no per-file overrides. Missing recommended rules: +- `G` — flake8-logging-format (catches f-string logging) +- `B` — flake8-bugbear (common bugs) +- `I` — isort (import ordering) +- `UP` — pyupgrade (Python version upgrades) +- `SIM` — simplify (code simplification) + +### Q3: No mypy Configuration + +No `[tool.mypy]` section in `pyproject.toml`. Running `mypy` uses default (lenient) settings. Missing: `--strict`, `--ignore-missing-imports`, `--no-implicit-optional`. + +### Q4: Missing Type Annotation on `api_call` + +`client.py:55` — `func` parameter and return type are both untyped: +```python +async def api_call(self, func, *args, **kwargs): # Should type func and return +``` + +### Q5: Missing `__all__` in Tool Modules + +No tool module exports `__all__`, making the public API surface unclear for tooling and documentation. + +--- + +## 8. Configuration Gaps + +### SSH Fields Missing from `.env.example` + +The following config fields exist in `config.py` but are absent from `.env.example`: + +```bash +PROXMOX_SSH_USER=root # SSH username (default: root) +PROXMOX_SSH_PORT=22 # SSH port (default: 22) +PROXMOX_SSH_KEY_PATH= # Path to SSH private key +PROXMOX_SSH_PASSWORD= # SSH password (falls back to PROXMOX_PASSWORD) +``` + +Users of disk management tools will get `SSHExecutionError` with no config guidance. + +### No API Timeout Configuration + +`client.py:34` hardcodes `"timeout": 30`. No environment variable to override for slow networks or large operations. + +### Dead Config Field + +`PROXMOX_MAX_CONCURRENT_TASKS` is defined and documented in `.env.example` but never used. + +--- + +## 9. Developer Experience + +### README — Nearly Empty + +`README.md` contains only: +```markdown +# ProxmoxMCP +``` + +Missing: +- Installation instructions +- Configuration guide +- Tool listing / capabilities overview +- Claude Desktop integration setup +- Usage examples +- Prerequisites (Python 3.11+, Proxmox VE) +- Contributing guidelines + +### No CI/CD + +No `.github/` directory exists. No GitHub Actions, no automated testing, no lint enforcement, no release automation. + +### No Docker Support + +No `Dockerfile` or `docker-compose.yml`. Server only runs via direct Python installation. + +### docs/ Contains Only Internal Plans + +`docs/plans/` has implementation plans but no end-user documentation. + +--- + +## 10. Missing Proxmox API Coverage + +### Tier 1 — High Value, Low-Medium Complexity (API-only) + +These extend existing patterns and don't require SSH. + +| Feature | Proposed Tools | API Endpoint | Complexity | +|---------|---------------|--------------|------------| +| **VM Disk Resize** | `resize_vm_disk` | `PUT /nodes/{node}/qemu/{vmid}/resize` | Low | +| **Cloud-Init Config** | `set_cloudinit_config`, `get_cloudinit_config` | `PUT /nodes/{node}/qemu/{vmid}/config` | Medium | +| **Node Power Mgmt** | `reboot_node`, `shutdown_node` | `POST /nodes/{node}/status` | Low | +| **Template Conversion** | `convert_vm_to_template` | `POST /nodes/{node}/qemu/{vmid}/template` | Low | +| **ISO/Template Download** | `download_to_storage` | `POST /nodes/{node}/storage/{storage}/download-url` | Low | +| **Pool Write Ops** | `create_pool`, `modify_pool`, `delete_pool` | `POST/PUT/DELETE /pools` | Low | +| **Firewall Write Ops** | `add_firewall_rule`, `delete_firewall_rule`, `set_firewall_options` | `POST/DELETE /nodes/{node}/firewall/rules` | Medium | + +### Tier 2 — Medium Value, Medium Complexity + +| Feature | Proposed Tools | API Endpoint | Complexity | +|---------|---------------|--------------|------------| +| **User Management** | `list_users`, `create_user`, `delete_user`, `set_permissions` | `/access/users`, `/access/acl` | Medium | +| **Backup Scheduling** | `create_backup_job`, `list_backup_jobs`, `delete_backup_job` | `GET/POST/DELETE /cluster/backup` | Medium | +| **HA Management** | `add_ha_resource`, `remove_ha_resource`, `get_ha_status` | `/cluster/ha/resources`, `/cluster/ha/status` | Medium | +| **Certificate/ACME** | `setup_acme`, `order_certificate`, `list_certificates` | `/nodes/{node}/certificates` | Medium | +| **Storage Upload** | `upload_to_storage` | `POST /nodes/{node}/storage/{storage}/upload` | Medium | + +### Tier 3 — Advanced Features, Higher Complexity + +| Feature | Proposed Tools | Complexity | +|---------|---------------|------------| +| **Ceph Management** | OSD create/remove, pool management, monitor status | High | +| **SDN (Software-Defined Networking)** | VNet, zone, VLAN management | High | +| **Multi-node SSH** | Node-aware SSH routing for disk tools | Medium | +| **Console Proxy** | VNC/SPICE proxy token generation | Medium | + +### Tier 4 — Quality, Testing & DevEx + +| Item | Impact | Effort | +|------|--------|--------| +| Fix `_resolve_node` duplication | Code quality | Low | +| Add network tool tests (~10 tests) | Coverage | Low | +| Add resource tests (~15 tests) | Coverage | Low | +| Add prompt tests (~8 tests) | Coverage | Low | +| Complete container tool tests (+8 tests) | Coverage | Low | +| Complete backup tool tests (+3 tests) | Coverage | Low | +| Fix logging to use `%` formatting | Consistency | Low | +| Expand ruff rules (G, B, I, UP, SIM) | Code quality | Low | +| Add mypy configuration | Type safety | Low | +| Fix security issues (S1-S7) | Security | Medium | +| Write proper README | Usability | Medium | +| Add GitHub Actions CI | Reliability | Medium | +| Add `.env.example` SSH section | Documentation | Low | +| Add Dockerfile | Deployment | Low | +| Update CLAUDE.md tool count | Accuracy | Trivial | + +--- + +## 11. Implementation Roadmap + +### Phase 1: Stabilize (fix existing issues) + +**Priority: Immediate** + +1. Fix security issues S1 (SSH host key), S2 (extra_config allowlist), S4 (start_vm protected check) +2. Add missing SSH config to `.env.example` +3. Fix `_resolve_node` duplication — extract to shared utility +4. Standardize logging to `%` formatting +5. Add `vm_type` validation in backup tools +6. Update CLAUDE.md tool count to 65 +7. Remove or implement `PROXMOX_MAX_CONCURRENT_TASKS` + +### Phase 2: Test Coverage + +**Priority: High** + +1. Add `test_network_tools.py` (~10 tests) +2. Add `test_resources.py` (~15 tests) +3. Add `test_prompts.py` (~8 tests) +4. Complete `test_container_tools.py` (+8 tests for missing tools) +5. Complete `test_backup_tools.py` (+3 tests for delete_snapshot, restore_backup) +6. Move add/remove_storage tests to `test_storage_tools.py` +7. Expand ruff rules and fix findings + +### Phase 3: High-Value API Tools + +**Priority: High** + +1. `resize_vm_disk` — most requested missing feature +2. `convert_vm_to_template` — common workflow +3. `reboot_node` / `shutdown_node` — node management essentials +4. `download_to_storage` — ISO/template download +5. `create_pool` / `modify_pool` / `delete_pool` — pool management + +### Phase 4: Infrastructure Management + +**Priority: Medium** + +1. Firewall write operations +2. Cloud-init configuration tools +3. User and permission management +4. Backup job scheduling +5. HA resource management + +### Phase 5: DevEx & Advanced + +**Priority: Lower** + +1. Write comprehensive README with examples +2. Add GitHub Actions CI/CD pipeline +3. Add Dockerfile for containerized deployment +4. Ceph management (if applicable to user's setup) +5. SDN management (if applicable) +6. Multi-node SSH support + +--- + +## Summary + +ProxmoxMCP is a well-architected MCP server with solid foundations (async design, safety guards, input sanitization). The recent disk management addition proves the architecture extends cleanly to SSH-based operations. + +**Key numbers:** +- 65 tools, 10 resources, 6 prompts +- 182 tests across 16 test files +- 3 modules with zero test coverage +- 7 security issues identified (1 critical, 1 high, 5 medium) +- ~25 additional Proxmox API features that could be implemented +- README, CI/CD, and Docker support are absent + +The project has a strong core. The recommended path is: stabilize (security fixes, consistency) → test coverage → new API tools → DevEx improvements. diff --git a/pyproject.toml b/pyproject.toml index b77db5b..7efc7ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,3 +40,25 @@ markers = ["integration: marks tests requiring a live Proxmox instance"] [tool.ruff] target-version = "py311" line-length = 100 + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "B", # flake8-bugbear + "UP", # pyupgrade + "RUF", # ruff-specific rules +] + +[tool.ruff.lint.isort] +known-first-party = ["proxmox_mcp"] + +[tool.mypy] +python_version = "3.11" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = false +check_untyped_defs = true +ignore_missing_imports = true diff --git a/src/proxmox_mcp/__main__.py b/src/proxmox_mcp/__main__.py index 4310de7..da17a87 100644 --- a/src/proxmox_mcp/__main__.py +++ b/src/proxmox_mcp/__main__.py @@ -1,4 +1,5 @@ """Allow running as python -m proxmox_mcp.""" + from proxmox_mcp.server import main main() diff --git a/src/proxmox_mcp/client.py b/src/proxmox_mcp/client.py index 0f68cfc..ed68184 100644 --- a/src/proxmox_mcp/client.py +++ b/src/proxmox_mcp/client.py @@ -2,16 +2,18 @@ import asyncio import logging + from proxmoxer import ProxmoxAPI from proxmox_mcp.config import ProxmoxConfig from proxmox_mcp.utils.errors import ( - ProxmoxConnectionError, AuthenticationError, - VMNotFoundError, - ProtectedResourceError, NodeNotAllowedError, + ProtectedResourceError, + ProxmoxConnectionError, + VMNotFoundError, ) +from proxmox_mcp.utils.validators import validate_node_name logger = logging.getLogger("proxmox-mcp") @@ -34,14 +36,24 @@ def _connect(config: ProxmoxConfig) -> ProxmoxAPI: "timeout": 30, } if config.PROXMOX_TOKEN_NAME and config.PROXMOX_TOKEN_VALUE: - kwargs["token_name"] = config.PROXMOX_TOKEN_NAME + # proxmoxer expects user and token_name as separate params. + # Support both "user@realm!tokenid" and plain "tokenid" formats. + token_name = config.PROXMOX_TOKEN_NAME + if "!" in token_name: + user, token_id = token_name.split("!", 1) + kwargs["user"] = user + kwargs["token_name"] = token_id + else: + kwargs["user"] = config.PROXMOX_USER or "" + kwargs["token_name"] = token_name kwargs["token_value"] = config.PROXMOX_TOKEN_VALUE elif config.PROXMOX_USER and config.PROXMOX_PASSWORD: kwargs["user"] = config.PROXMOX_USER kwargs["password"] = config.PROXMOX_PASSWORD else: raise AuthenticationError( - "No authentication configured. Set PROXMOX_TOKEN_NAME/VALUE or PROXMOX_USER/PASSWORD." + "No authentication configured. " + "Set PROXMOX_TOKEN_NAME/VALUE or PROXMOX_USER/PASSWORD." ) try: return ProxmoxAPI(**kwargs) @@ -101,9 +113,23 @@ def dry_run_response(self, action: str, **params) -> dict: "status": "dry_run", "action": action, "params": params, - "message": "DRY RUN: This action was NOT executed. Set PROXMOX_DRY_RUN=false to perform.", + "message": ( + "DRY RUN: This action was NOT executed. " + "Set PROXMOX_DRY_RUN=false to perform." + ), } + async def resolve_node(self, vmid: int, node: str | None) -> str: + """Resolve and validate a node for a VMID. + + If node is provided, validate it. Otherwise auto-detect from cluster resources. + """ + if node: + validate_node_name(node) + self.validate_node(node) + return node + return await self.resolve_node_for_vmid(vmid) + @property def is_dry_run(self) -> bool: return self.config.PROXMOX_DRY_RUN diff --git a/src/proxmox_mcp/config.py b/src/proxmox_mcp/config.py index f23f863..6e79336 100644 --- a/src/proxmox_mcp/config.py +++ b/src/proxmox_mcp/config.py @@ -24,13 +24,14 @@ class ProxmoxConfig(BaseSettings): PROXMOX_DRY_RUN: bool = False PROXMOX_ALLOWED_NODES: str = "" PROXMOX_PROTECTED_VMIDS: str = "" - PROXMOX_MAX_CONCURRENT_TASKS: int = 5 # SSH (for disk management tools) PROXMOX_SSH_USER: str = "root" PROXMOX_SSH_PORT: int = 22 PROXMOX_SSH_KEY_PATH: str | None = None PROXMOX_SSH_PASSWORD: str | None = None + PROXMOX_SSH_KNOWN_HOSTS: str = "" + PROXMOX_SSH_HOST_KEY_CHECKING: bool = True # Server MCP_TRANSPORT: str = "stdio" diff --git a/src/proxmox_mcp/prompts/prompts.py b/src/proxmox_mcp/prompts/prompts.py index e01e7ad..3844086 100644 --- a/src/proxmox_mcp/prompts/prompts.py +++ b/src/proxmox_mcp/prompts/prompts.py @@ -4,6 +4,7 @@ def get_mcp(): from proxmox_mcp.server import mcp + return mcp diff --git a/src/proxmox_mcp/resources/resources.py b/src/proxmox_mcp/resources/resources.py index 07fa2ae..f769fdc 100644 --- a/src/proxmox_mcp/resources/resources.py +++ b/src/proxmox_mcp/resources/resources.py @@ -3,18 +3,21 @@ import json import logging -from proxmox_mcp.utils.formatters import format_vm_summary, format_container_summary + +from proxmox_mcp.utils.formatters import format_container_summary, format_vm_summary logger = logging.getLogger("proxmox-mcp") def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp diff --git a/src/proxmox_mcp/server.py b/src/proxmox_mcp/server.py index 88adea6..e2207f8 100644 --- a/src/proxmox_mcp/server.py +++ b/src/proxmox_mcp/server.py @@ -1,10 +1,12 @@ """FastMCP server definition and entry point for Proxmox VE Manager.""" import logging + from mcp.server.fastmcp import FastMCP -from proxmox_mcp.config import ProxmoxConfig from proxmox_mcp.client import ProxmoxClient +from proxmox_mcp.config import ProxmoxConfig +from proxmox_mcp.ssh import SSHExecutor # Initialize config and logging config = ProxmoxConfig() @@ -27,15 +29,24 @@ ), ) -# Initialize Proxmox client +# Initialize Proxmox client and SSH executor proxmox_client = ProxmoxClient(config) +ssh_executor = SSHExecutor(config) # Import tool modules to register them with mcp -from proxmox_mcp.tools import cluster, node, storage, task # noqa: E402, F401 -from proxmox_mcp.tools import vm, container # noqa: E402, F401 -from proxmox_mcp.tools import backup, network # noqa: E402, F401 -from proxmox_mcp.resources import resources # noqa: E402, F401 from proxmox_mcp.prompts import prompts # noqa: E402, F401 +from proxmox_mcp.resources import resources # noqa: E402, F401 +from proxmox_mcp.tools import ( # noqa: E402, F401 # noqa: E402, F401 # noqa: E402, F401 + backup, + cluster, + container, + disk, + network, + node, + storage, + task, + vm, +) def main(): diff --git a/src/proxmox_mcp/ssh.py b/src/proxmox_mcp/ssh.py new file mode 100644 index 0000000..224f82c --- /dev/null +++ b/src/proxmox_mcp/ssh.py @@ -0,0 +1,136 @@ +"""SSH execution layer for running commands on Proxmox nodes.""" + +import asyncio +import logging +import os +from dataclasses import dataclass +from pathlib import Path + +import paramiko + +from proxmox_mcp.config import ProxmoxConfig +from proxmox_mcp.utils.errors import SSHExecutionError + +logger = logging.getLogger("proxmox-mcp") + +MAX_SSH_TIMEOUT = 120 + + +@dataclass +class SSHResult: + """Result of an SSH command execution.""" + + exit_code: int + stdout: str + stderr: str + + @property + def success(self) -> bool: + return self.exit_code == 0 + + +class SSHExecutor: + """Execute commands on Proxmox nodes over SSH.""" + + def __init__(self, config: ProxmoxConfig) -> None: + self.config = config + + def _create_client(self, host: str) -> paramiko.SSHClient: + """Create and configure a paramiko SSH client.""" + client = paramiko.SSHClient() + + if self.config.PROXMOX_SSH_HOST_KEY_CHECKING: + known_hosts = ( + self.config.PROXMOX_SSH_KNOWN_HOSTS + or os.path.expanduser("~/.ssh/known_hosts") + ) + if os.path.exists(known_hosts): + client.load_host_keys(known_hosts) + client.set_missing_host_key_policy(paramiko.RejectPolicy()) + else: + logger.warning("SSH host key checking disabled — vulnerable to MITM attacks") + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + + connect_kwargs: dict = { + "hostname": host, + "port": self.config.PROXMOX_SSH_PORT, + "username": self.config.PROXMOX_SSH_USER, + "timeout": 10, + } + + # Resolve SSH password: dedicated SSH password, or fall back to Proxmox API password + ssh_password = self.config.PROXMOX_SSH_PASSWORD or self.config.PROXMOX_PASSWORD + + if self.config.PROXMOX_SSH_KEY_PATH: + key_path = Path(self.config.PROXMOX_SSH_KEY_PATH).expanduser() + if not key_path.exists(): + raise SSHExecutionError(f"SSH key not found: {key_path}") + connect_kwargs["key_filename"] = str(key_path) + elif ssh_password: + connect_kwargs["password"] = ssh_password + # Skip default key discovery when password is explicitly provided + connect_kwargs["look_for_keys"] = False + connect_kwargs["allow_agent"] = False + # Fallback: paramiko will try default keys (~/.ssh/id_rsa, etc.) + + try: + client.connect(**connect_kwargs) + except Exception as e: + client.close() + raise SSHExecutionError(f"SSH connection to {host} failed: {e}") from e + + return client + + def _execute_sync(self, host: str, command: str, timeout: int = 30) -> SSHResult: + """Execute a command over SSH synchronously.""" + timeout = min(timeout, MAX_SSH_TIMEOUT) + client = self._create_client(host) + try: + logger.debug("SSH %s: %s", host, command) + _, stdout, stderr = client.exec_command(command, timeout=timeout) + exit_code = stdout.channel.recv_exit_status() + stdout_str = stdout.read().decode("utf-8", errors="replace") + stderr_str = stderr.read().decode("utf-8", errors="replace") + return SSHResult( + exit_code=exit_code, + stdout=stdout_str, + stderr=stderr_str, + ) + except Exception as e: + raise SSHExecutionError(f"SSH command failed on {host}: {e}") from e + finally: + client.close() + + async def execute(self, node: str, command: str, timeout: int = 30) -> SSHResult: + """Execute a command on a Proxmox node asynchronously. + + The node name is resolved to the PROXMOX_HOST (since the API host is + typically also reachable via SSH). For multi-node clusters, node DNS + names are used directly. + + Args: + node: Proxmox node name (used as hostname). + command: Shell command to execute. + timeout: Command timeout in seconds (max 120). + """ + # Resolve node to hostname. For single-node setups the API host works; + # for clusters the node name itself is typically DNS-resolvable. + host = self.config.PROXMOX_HOST + + # Ensure sbin dirs are in PATH for system tools (parted, mkfs, etc.) + full_command = ( + "export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH; " + + command + ) + + logger.info("SSH executing on %s (%s): %s", node, host, command) + result = await asyncio.to_thread(self._execute_sync, host, full_command, timeout) + + if result.exit_code != 0: + logger.warning( + "SSH command exited %d on %s: stderr=%s", + result.exit_code, + node, + result.stderr[:200], + ) + return result diff --git a/src/proxmox_mcp/tools/backup.py b/src/proxmox_mcp/tools/backup.py index 29e9f33..0ebd125 100644 --- a/src/proxmox_mcp/tools/backup.py +++ b/src/proxmox_mcp/tools/backup.py @@ -1,38 +1,46 @@ """Backup and snapshot management tools.""" import logging -from proxmox_mcp.utils.errors import format_error_response -from proxmox_mcp.utils.validators import validate_vmid, validate_node_name + +from proxmox_mcp.utils.errors import InvalidParameterError, format_error_response from proxmox_mcp.utils.formatters import format_task_result +from proxmox_mcp.utils.sanitizers import validate_snapname +from proxmox_mcp.utils.validators import validate_node_name, validate_vmid logger = logging.getLogger("proxmox-mcp") def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp mcp = get_mcp() +VALID_VM_TYPES = frozenset({"qemu", "lxc"}) -async def _resolve_node(client, vmid: int, node: str | None) -> str: - if node: - validate_node_name(node) - client.validate_node(node) - return node - return await client.resolve_node_for_vmid(vmid) + +def _validate_vm_type(vm_type: str) -> None: + if vm_type not in VALID_VM_TYPES: + raise InvalidParameterError( + f"vm_type must be 'qemu' or 'lxc', got '{vm_type}'." + ) @mcp.tool() async def create_snapshot( - vmid: int, snapname: str, node: str | None = None, - description: str | None = None, include_vmstate: bool = False, + vmid: int, + snapname: str, + node: str | None = None, + description: str | None = None, + include_vmstate: bool = False, vm_type: str = "qemu", ) -> dict: """Create a snapshot of a VM or container. @@ -48,7 +56,9 @@ async def create_snapshot( try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + validate_snapname(snapname) + _validate_vm_type(vm_type) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("create_snapshot", vmid=vmid, snapname=snapname) kwargs = {"snapname": snapname} @@ -56,7 +66,11 @@ async def create_snapshot( kwargs["description"] = description if include_vmstate and vm_type == "qemu": kwargs["vmstate"] = 1 - api_path = client.api.nodes(node).qemu(vmid) if vm_type == "qemu" else client.api.nodes(node).lxc(vmid) + api_path = ( + client.api.nodes(node).qemu(vmid) + if vm_type == "qemu" + else client.api.nodes(node).lxc(vmid) + ) logger.info("Creating snapshot '%s' for %s %d", snapname, vm_type, vmid) upid = await client.api_call(api_path.snapshot.post, **kwargs) return format_task_result({"data": upid}) @@ -76,8 +90,13 @@ async def list_snapshots(vmid: int, node: str | None = None, vm_type: str = "qem try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) - api_path = client.api.nodes(node).qemu(vmid) if vm_type == "qemu" else client.api.nodes(node).lxc(vmid) + _validate_vm_type(vm_type) + node = await client.resolve_node(vmid, node) + api_path = ( + client.api.nodes(node).qemu(vmid) + if vm_type == "qemu" + else client.api.nodes(node).lxc(vmid) + ) data = await client.api_call(api_path.snapshot.get) return {"status": "success", "vmid": vmid, "node": node, "snapshots": data} except Exception as e: @@ -86,8 +105,11 @@ async def list_snapshots(vmid: int, node: str | None = None, vm_type: str = "qem @mcp.tool() async def rollback_snapshot( - vmid: int, snapname: str, node: str | None = None, - vm_type: str = "qemu", confirm: bool = False, + vmid: int, + snapname: str, + node: str | None = None, + vm_type: str = "qemu", + confirm: bool = False, ) -> dict: """Rollback a VM/CT to a snapshot. Set confirm=True to execute. @@ -101,17 +123,26 @@ async def rollback_snapshot( try: client = get_client() validate_vmid(vmid) + validate_snapname(snapname) + _validate_vm_type(vm_type) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if not confirm: return { "status": "confirmation_required", - "warning": f"This will rollback {vm_type} {vmid} to snapshot '{snapname}'. Current state will be lost.", + "warning": ( + f"This will rollback {vm_type} {vmid} to snapshot '{snapname}'. " + f"Current state will be lost." + ), "action": "Call rollback_snapshot again with confirm=True to proceed.", } if client.is_dry_run: return client.dry_run_response("rollback_snapshot", vmid=vmid, snapname=snapname) - api_path = client.api.nodes(node).qemu(vmid) if vm_type == "qemu" else client.api.nodes(node).lxc(vmid) + api_path = ( + client.api.nodes(node).qemu(vmid) + if vm_type == "qemu" + else client.api.nodes(node).lxc(vmid) + ) logger.warning("Rolling back %s %d to snapshot '%s'", vm_type, vmid, snapname) upid = await client.api_call(api_path.snapshot(snapname).rollback.post) return format_task_result({"data": upid}) @@ -121,8 +152,11 @@ async def rollback_snapshot( @mcp.tool() async def delete_snapshot( - vmid: int, snapname: str, node: str | None = None, - vm_type: str = "qemu", confirm: bool = False, + vmid: int, + snapname: str, + node: str | None = None, + vm_type: str = "qemu", + confirm: bool = False, ) -> dict: """Delete a snapshot. Set confirm=True to execute. @@ -136,7 +170,9 @@ async def delete_snapshot( try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + validate_snapname(snapname) + _validate_vm_type(vm_type) + node = await client.resolve_node(vmid, node) if not confirm: return { "status": "confirmation_required", @@ -145,7 +181,11 @@ async def delete_snapshot( } if client.is_dry_run: return client.dry_run_response("delete_snapshot", vmid=vmid, snapname=snapname) - api_path = client.api.nodes(node).qemu(vmid) if vm_type == "qemu" else client.api.nodes(node).lxc(vmid) + api_path = ( + client.api.nodes(node).qemu(vmid) + if vm_type == "qemu" + else client.api.nodes(node).lxc(vmid) + ) logger.warning("Deleting snapshot '%s' from %s %d", snapname, vm_type, vmid) upid = await client.api_call(api_path.snapshot(snapname).delete) return format_task_result({"data": upid}) @@ -155,8 +195,12 @@ async def delete_snapshot( @mcp.tool() async def create_backup( - vmid: int, node: str | None = None, storage: str = "local", - mode: str = "snapshot", compress: str = "zstd", notes: str | None = None, + vmid: int, + node: str | None = None, + storage: str = "local", + mode: str = "snapshot", + compress: str = "zstd", + notes: str | None = None, ) -> dict: """Initiate a vzdump backup of a VM or container. @@ -171,7 +215,7 @@ async def create_backup( try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("create_backup", vmid=vmid, storage=storage) kwargs = { @@ -203,21 +247,34 @@ async def list_backups(node: str, storage: str = "local", vmid: int | None = Non validate_node_name(node) client.validate_node(node) kwargs = {"content": "backup"} - data = await client.api_call( - client.api.nodes(node).storage(storage).content.get, **kwargs - ) + data = await client.api_call(client.api.nodes(node).storage(storage).content.get, **kwargs) if vmid is not None: vmid_str = str(vmid) - data = [b for b in data if f"-{vmid_str}-" in b.get("volid", "") or b.get("volid", "").endswith(f"-{vmid_str}")] - return {"status": "success", "node": node, "storage": storage, "backups": data, "total": len(data)} + data = [ + b + for b in data + if f"-{vmid_str}-" in b.get("volid", "") + or b.get("volid", "").endswith(f"-{vmid_str}") + ] + return { + "status": "success", + "node": node, + "storage": storage, + "backups": data, + "total": len(data), + } except Exception as e: return format_error_response(e) @mcp.tool() async def restore_backup( - node: str, storage: str, archive: str, vmid: int, - force: bool = False, confirm: bool = False, + node: str, + storage: str, + archive: str, + vmid: int, + force: bool = False, + confirm: bool = False, ) -> dict: """Restore a VM/CT from a backup archive. Set confirm=True to execute. @@ -258,3 +315,120 @@ async def restore_backup( return format_task_result({"data": upid}) except Exception as e: return format_error_response(e) + + +# --------------------------------------------------------------------------- +# Backup job scheduling +# --------------------------------------------------------------------------- + + +@mcp.tool() +async def list_backup_jobs() -> dict: + """List all scheduled backup jobs in the cluster. + + Returns each job's ID, schedule, target storage, included VMs, and settings. + """ + try: + client = get_client() + logger.info("Listing scheduled backup jobs") + data = await client.api_call(client.api.cluster.backup.get) + return {"status": "success", "count": len(data), "jobs": data} + except Exception as e: + logger.error("Failed to list backup jobs: %s", e) + return format_error_response(e) + + +@mcp.tool() +async def create_backup_job( + storage: str, + schedule: str, + vmid: str | None = None, + all_vms: bool = False, + mode: str = "snapshot", + compress: str = "zstd", + mailnotification: str | None = None, + mailto: str | None = None, + enabled: bool = True, + comment: str | None = None, +) -> dict: + """Create a scheduled backup job. + + Args: + storage: Target storage for backups (e.g. 'local', 'nfs-backup'). + schedule: Cron-like schedule (e.g. '0 2 * * *' for daily at 2am). + vmid: Comma-separated VMIDs to back up (e.g. '100,101,102'). + all_vms: Back up all VMs/CTs (overrides vmid). + mode: Backup mode - 'snapshot', 'suspend', or 'stop' (default 'snapshot'). + compress: Compression - 'zstd', 'lzo', 'gzip', or 'none' (default 'zstd'). + mailnotification: Email notification - 'always' or 'failure'. + mailto: Comma-separated email addresses for notifications. + enabled: Enable the job (default True). + comment: Job description/comment. + """ + try: + client = get_client() + if client.is_dry_run: + return client.dry_run_response( + "create_backup_job", storage=storage, schedule=schedule + ) + kwargs: dict = { + "storage": storage, + "schedule": schedule, + "mode": mode, + "compress": compress, + "enabled": 1 if enabled else 0, + } + if all_vms: + kwargs["all"] = 1 + elif vmid: + kwargs["vmid"] = vmid + if mailnotification: + kwargs["mailnotification"] = mailnotification + if mailto: + kwargs["mailto"] = mailto + if comment: + kwargs["comment"] = comment + logger.info("Creating backup job: storage=%s, schedule=%s", storage, schedule) + await client.api_call(client.api.cluster.backup.post, **kwargs) + return { + "status": "success", + "message": "Backup job created successfully.", + "storage": storage, + "schedule": schedule, + } + except Exception as e: + logger.error("Failed to create backup job: %s", e) + return format_error_response(e) + + +@mcp.tool() +async def delete_backup_job( + job_id: str, + confirm: bool = False, +) -> dict: + """Delete a scheduled backup job. Set confirm=True to execute. + + Args: + job_id: The backup job ID (from list_backup_jobs). + confirm: Must be True to execute. + """ + try: + client = get_client() + if not confirm: + return { + "status": "confirmation_required", + "warning": f"This will delete backup job '{job_id}'.", + "action": "Call delete_backup_job with confirm=True to proceed.", + } + if client.is_dry_run: + return client.dry_run_response("delete_backup_job", job_id=job_id) + logger.warning("Deleting backup job '%s'", job_id) + await client.api_call(client.api.cluster.backup(job_id).delete) + return { + "status": "success", + "job_id": job_id, + "message": f"Backup job '{job_id}' deleted.", + } + except Exception as e: + logger.error("Failed to delete backup job '%s': %s", job_id, e) + return format_error_response(e) diff --git a/src/proxmox_mcp/tools/cluster.py b/src/proxmox_mcp/tools/cluster.py index d3af3ac..e190da4 100644 --- a/src/proxmox_mcp/tools/cluster.py +++ b/src/proxmox_mcp/tools/cluster.py @@ -1,6 +1,7 @@ -"""Cluster-level read-only tools for Proxmox VE.""" +"""Cluster-level tools for Proxmox VE.""" import logging + from proxmox_mcp.utils.errors import format_error_response logger = logging.getLogger("proxmox-mcp") @@ -8,11 +9,13 @@ def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp @@ -42,14 +45,16 @@ async def get_cluster_status() -> dict: "nodes": item.get("nodes"), } elif item.get("type") == "node": - nodes.append({ - "name": item.get("name"), - "id": item.get("id"), - "online": item.get("online"), - "ip": item.get("ip"), - "level": item.get("level", ""), - "local": item.get("local", 0), - }) + nodes.append( + { + "name": item.get("name"), + "id": item.get("id"), + "online": item.get("online"), + "ip": item.get("ip"), + "level": item.get("level", ""), + "local": item.get("local", 0), + } + ) return { "status": "success", @@ -57,7 +62,7 @@ async def get_cluster_status() -> dict: "nodes": nodes, } except Exception as e: - logger.error(f"Failed to get cluster status: {e}") + logger.error("Failed to get cluster status: %s", e) return format_error_response(e) @@ -71,7 +76,7 @@ async def get_cluster_resources(resource_type: str | None = None) -> dict: """ try: client = get_client() - logger.info(f"Fetching cluster resources (type={resource_type})") + logger.info("Fetching cluster resources (type=%s)", resource_type) kwargs = {} if resource_type: kwargs["type"] = resource_type @@ -84,7 +89,7 @@ async def get_cluster_resources(resource_type: str | None = None) -> dict: "resources": data, } except Exception as e: - logger.error(f"Failed to get cluster resources: {e}") + logger.error("Failed to get cluster resources: %s", e) return format_error_response(e) @@ -97,7 +102,7 @@ async def get_cluster_log(max_entries: int = 50) -> dict: """ try: client = get_client() - logger.info(f"Fetching cluster log (max_entries={max_entries})") + logger.info("Fetching cluster log (max_entries=%d)", max_entries) data = await client.api_call(client.api.cluster.log.get, max=max_entries) return { @@ -106,7 +111,7 @@ async def get_cluster_log(max_entries: int = 50) -> dict: "entries": data, } except Exception as e: - logger.error(f"Failed to get cluster log: {e}") + logger.error("Failed to get cluster log: %s", e) return format_error_response(e) @@ -126,7 +131,7 @@ async def get_next_vmid() -> dict: "vmid": int(data), } except Exception as e: - logger.error(f"Failed to get next VMID: {e}") + logger.error("Failed to get next VMID: %s", e) return format_error_response(e) @@ -147,5 +152,442 @@ async def list_pools() -> dict: "pools": data, } except Exception as e: - logger.error(f"Failed to list pools: {e}") + logger.error("Failed to list pools: %s", e) + return format_error_response(e) + + +@mcp.tool() +async def create_pool(poolid: str, comment: str | None = None) -> dict: + """Create a new resource pool. + + Args: + poolid: Unique pool identifier (e.g. 'production', 'dev-team'). + comment: Optional description for the pool. + """ + try: + client = get_client() + if client.is_dry_run: + return client.dry_run_response("create_pool", poolid=poolid) + kwargs = {"poolid": poolid} + if comment: + kwargs["comment"] = comment + logger.info("Creating resource pool '%s'", poolid) + await client.api_call(client.api.pools.post, **kwargs) + return { + "status": "success", + "poolid": poolid, + "message": f"Pool '{poolid}' created successfully.", + } + except Exception as e: + logger.error("Failed to create pool '%s': %s", poolid, e) + return format_error_response(e) + + +@mcp.tool() +async def modify_pool( + poolid: str, + comment: str | None = None, + vms: str | None = None, + storage: str | None = None, + delete: bool = False, +) -> dict: + """Modify a resource pool — add/remove members or update comment. + + Args: + poolid: The pool to modify. + comment: New comment/description for the pool. + vms: Comma-separated VMIDs to add or remove (e.g. '100,101'). + storage: Comma-separated storage IDs to add or remove. + delete: If True, remove the specified vms/storage instead of adding. + """ + try: + client = get_client() + if client.is_dry_run: + return client.dry_run_response("modify_pool", poolid=poolid) + kwargs: dict = {} + if comment is not None: + kwargs["comment"] = comment + if vms: + kwargs["vms"] = vms + if storage: + kwargs["storage"] = storage + if delete: + kwargs["delete"] = 1 + if not kwargs: + return format_error_response( + Exception("No changes specified for pool modification.") + ) + logger.info("Modifying pool '%s': %s", poolid, list(kwargs.keys())) + await client.api_call(client.api.pools(poolid).put, **kwargs) + return { + "status": "success", + "poolid": poolid, + "changes": list(kwargs.keys()), + } + except Exception as e: + logger.error("Failed to modify pool '%s': %s", poolid, e) + return format_error_response(e) + + +@mcp.tool() +async def delete_pool(poolid: str, confirm: bool = False) -> dict: + """Delete a resource pool. Set confirm=True to execute. + + The pool must be empty (no VMs or storage assigned). + + Args: + poolid: The pool to delete. + confirm: Must be True to execute. + """ + try: + client = get_client() + if not confirm: + return { + "status": "confirmation_required", + "warning": f"This will delete resource pool '{poolid}'.", + "action": "Call delete_pool again with confirm=True to proceed.", + } + if client.is_dry_run: + return client.dry_run_response("delete_pool", poolid=poolid) + logger.warning("Deleting resource pool '%s'", poolid) + await client.api_call(client.api.pools(poolid).delete) + return { + "status": "success", + "poolid": poolid, + "message": f"Pool '{poolid}' deleted successfully.", + } + except Exception as e: + logger.error("Failed to delete pool '%s': %s", poolid, e) + return format_error_response(e) + + +# --------------------------------------------------------------------------- +# User and permission management +# --------------------------------------------------------------------------- + + +@mcp.tool() +async def list_users() -> dict: + """List all users in the Proxmox cluster. + + Returns each user's ID, realm, email, enabled status, and groups. + """ + try: + client = get_client() + logger.info("Listing all users") + data = await client.api_call(client.api.access.users.get) + return {"status": "success", "count": len(data), "users": data} + except Exception as e: + logger.error("Failed to list users: %s", e) + return format_error_response(e) + + +@mcp.tool() +async def create_user( + userid: str, + password: str | None = None, + email: str | None = None, + firstname: str | None = None, + lastname: str | None = None, + groups: str | None = None, + comment: str | None = None, + enable: bool = True, +) -> dict: + """Create a new user. + + Args: + userid: User ID in 'user@realm' format (e.g. 'john@pve', 'admin@pam'). + password: Password (required for PVE realm, not for PAM/LDAP). + email: User email address. + firstname: First name. + lastname: Last name. + groups: Comma-separated group names. + comment: User comment/description. + enable: Enable the user (default True). + """ + try: + client = get_client() + if client.is_dry_run: + return client.dry_run_response("create_user", userid=userid) + kwargs: dict = {"userid": userid, "enable": 1 if enable else 0} + if password: + kwargs["password"] = password + if email: + kwargs["email"] = email + if firstname: + kwargs["firstname"] = firstname + if lastname: + kwargs["lastname"] = lastname + if groups: + kwargs["groups"] = groups + if comment: + kwargs["comment"] = comment + logger.info("Creating user '%s'", userid) + await client.api_call(client.api.access.users.post, **kwargs) + return { + "status": "success", + "userid": userid, + "message": f"User '{userid}' created successfully.", + } + except Exception as e: + logger.error("Failed to create user '%s': %s", userid, e) + return format_error_response(e) + + +@mcp.tool() +async def delete_user(userid: str, confirm: bool = False) -> dict: + """Delete a user. Set confirm=True to execute. + + Args: + userid: User ID to delete (e.g. 'john@pve'). + confirm: Must be True to execute. + """ + try: + client = get_client() + if not confirm: + return { + "status": "confirmation_required", + "warning": f"This will delete user '{userid}'.", + "action": "Call delete_user again with confirm=True to proceed.", + } + if client.is_dry_run: + return client.dry_run_response("delete_user", userid=userid) + logger.warning("Deleting user '%s'", userid) + await client.api_call(client.api.access.users(userid).delete) + return { + "status": "success", + "userid": userid, + "message": f"User '{userid}' deleted.", + } + except Exception as e: + logger.error("Failed to delete user '%s': %s", userid, e) + return format_error_response(e) + + +@mcp.tool() +async def list_roles() -> dict: + """List all available roles in the Proxmox cluster. + + Returns each role's ID and associated privileges. + """ + try: + client = get_client() + logger.info("Listing all roles") + data = await client.api_call(client.api.access.roles.get) + return {"status": "success", "count": len(data), "roles": data} + except Exception as e: + logger.error("Failed to list roles: %s", e) + return format_error_response(e) + + +@mcp.tool() +async def set_user_permission( + path: str, + roles: str, + users: str | None = None, + groups: str | None = None, + propagate: bool = True, +) -> dict: + """Set access control permissions (ACL). + + Args: + path: ACL path (e.g. '/', '/vms/100', '/storage/local', '/pool/dev'). + roles: Comma-separated role names (e.g. 'PVEVMUser', 'PVEAdmin'). + users: Comma-separated user IDs to grant (e.g. 'john@pve'). + groups: Comma-separated group names to grant. + propagate: Propagate to child objects (default True). + """ + try: + client = get_client() + if not users and not groups: + return format_error_response( + Exception("Must specify either 'users' or 'groups' (or both).") + ) + if client.is_dry_run: + return client.dry_run_response( + "set_user_permission", path=path, roles=roles + ) + kwargs: dict = {"path": path, "roles": roles, "propagate": 1 if propagate else 0} + if users: + kwargs["users"] = users + if groups: + kwargs["groups"] = groups + logger.info("Setting ACL on '%s': roles=%s", path, roles) + await client.api_call(client.api.access.acl.put, **kwargs) + return { + "status": "success", + "path": path, + "roles": roles, + "users": users, + "groups": groups, + } + except Exception as e: + logger.error("Failed to set ACL on '%s': %s", path, e) + return format_error_response(e) + + +# --------------------------------------------------------------------------- +# HA (High Availability) resource management +# --------------------------------------------------------------------------- + +VALID_HA_STATES = frozenset({"started", "stopped", "disabled", "ignored"}) + + +@mcp.tool() +async def list_ha_resources() -> dict: + """List all HA-managed resources in the cluster. + + Returns each resource's SID, state, group, and settings. + """ + try: + client = get_client() + logger.info("Listing HA resources") + data = await client.api_call(client.api.cluster.ha.resources.get) + return {"status": "success", "count": len(data), "resources": data} + except Exception as e: + logger.error("Failed to list HA resources: %s", e) + return format_error_response(e) + + +@mcp.tool() +async def create_ha_resource( + sid: str, + state: str = "started", + group: str | None = None, + max_restart: int | None = None, + max_relocate: int | None = None, + comment: str | None = None, +) -> dict: + """Add a VM/CT to HA management. + + Args: + sid: Service ID in 'type:vmid' format (e.g. 'vm:100', 'ct:200'). + state: Desired state - 'started', 'stopped', 'disabled', 'ignored'. + group: HA group to assign to. + max_restart: Max restart attempts on failure (default: cluster setting). + max_relocate: Max relocations on failure (default: cluster setting). + comment: Resource comment/description. + """ + try: + client = get_client() + if state not in VALID_HA_STATES: + return format_error_response( + Exception( + f"Invalid state '{state}'. " + f"Must be one of: {', '.join(sorted(VALID_HA_STATES))}" + ) + ) + if client.is_dry_run: + return client.dry_run_response("create_ha_resource", sid=sid, state=state) + kwargs: dict = {"sid": sid, "state": state} + if group: + kwargs["group"] = group + if max_restart is not None: + kwargs["max_restart"] = max_restart + if max_relocate is not None: + kwargs["max_relocate"] = max_relocate + if comment: + kwargs["comment"] = comment + logger.info("Adding HA resource '%s' with state '%s'", sid, state) + await client.api_call(client.api.cluster.ha.resources.post, **kwargs) + return { + "status": "success", + "sid": sid, + "state": state, + "message": f"HA resource '{sid}' created.", + } + except Exception as e: + logger.error("Failed to create HA resource '%s': %s", sid, e) + return format_error_response(e) + + +@mcp.tool() +async def modify_ha_resource( + sid: str, + state: str | None = None, + group: str | None = None, + max_restart: int | None = None, + max_relocate: int | None = None, + comment: str | None = None, +) -> dict: + """Modify an existing HA resource. + + Args: + sid: Service ID (e.g. 'vm:100'). + state: New desired state - 'started', 'stopped', 'disabled', 'ignored'. + group: HA group to assign to. + max_restart: Max restart attempts. + max_relocate: Max relocations. + comment: Resource comment. + """ + try: + client = get_client() + if state is not None and state not in VALID_HA_STATES: + return format_error_response( + Exception( + f"Invalid state '{state}'. " + f"Must be one of: {', '.join(sorted(VALID_HA_STATES))}" + ) + ) + if client.is_dry_run: + return client.dry_run_response("modify_ha_resource", sid=sid) + kwargs: dict = {} + if state is not None: + kwargs["state"] = state + if group is not None: + kwargs["group"] = group + if max_restart is not None: + kwargs["max_restart"] = max_restart + if max_relocate is not None: + kwargs["max_relocate"] = max_relocate + if comment is not None: + kwargs["comment"] = comment + if not kwargs: + return format_error_response( + Exception("No changes specified for HA resource.") + ) + logger.info("Modifying HA resource '%s': %s", sid, list(kwargs.keys())) + await client.api_call(client.api.cluster.ha.resources(sid).put, **kwargs) + return { + "status": "success", + "sid": sid, + "changes": list(kwargs.keys()), + } + except Exception as e: + logger.error("Failed to modify HA resource '%s': %s", sid, e) + return format_error_response(e) + + +@mcp.tool() +async def delete_ha_resource(sid: str, confirm: bool = False) -> dict: + """Remove a VM/CT from HA management. Set confirm=True to execute. + + This does NOT delete the VM/CT, only removes it from HA. + + Args: + sid: Service ID (e.g. 'vm:100'). + confirm: Must be True to execute. + """ + try: + client = get_client() + if not confirm: + return { + "status": "confirmation_required", + "warning": ( + f"This will remove '{sid}' from HA management. " + f"The VM/CT itself will NOT be deleted." + ), + "action": "Call delete_ha_resource with confirm=True to proceed.", + } + if client.is_dry_run: + return client.dry_run_response("delete_ha_resource", sid=sid) + logger.warning("Removing HA resource '%s'", sid) + await client.api_call(client.api.cluster.ha.resources(sid).delete) + return { + "status": "success", + "sid": sid, + "message": f"HA resource '{sid}' removed.", + } + except Exception as e: + logger.error("Failed to delete HA resource '%s': %s", sid, e) return format_error_response(e) diff --git a/src/proxmox_mcp/tools/container.py b/src/proxmox_mcp/tools/container.py index 47c66ee..ebac1fc 100644 --- a/src/proxmox_mcp/tools/container.py +++ b/src/proxmox_mcp/tools/container.py @@ -2,34 +2,44 @@ import json import logging + from proxmox_mcp.utils.errors import format_error_response -from proxmox_mcp.utils.validators import validate_vmid, validate_node_name from proxmox_mcp.utils.formatters import format_container_summary, format_task_result +from proxmox_mcp.utils.validators import validate_node_name, validate_vmid logger = logging.getLogger("proxmox-mcp") +# Allowlist of container config keys safe to modify via extra_config. +CT_SAFE_CONFIG_KEYS = frozenset({ + # Resources + "memory", "swap", "cores", "cpulimit", "cpuunits", + # Identity + "hostname", "description", "tags", "onboot", "startup", "protection", + # Storage + "rootfs", "mp0", "mp1", "mp2", "mp3", + # Network + "net0", "net1", "net2", "net3", + "nameserver", "searchdomain", + # OS + "ostype", "arch", "unprivileged", "features", +}) + def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp mcp = get_mcp() -async def _resolve_node(client, vmid: int, node: str | None) -> str: - if node: - validate_node_name(node) - client.validate_node(node) - return node - return await client.resolve_node_for_vmid(vmid) - - @mcp.tool() async def list_containers(node: str | None = None, status_filter: str | None = None) -> dict: """List all LXC containers across the cluster or on a specific node. @@ -66,11 +76,13 @@ async def get_container_status(vmid: int, node: str | None = None) -> dict: try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) data = await client.api_call(client.api.nodes(node).lxc(vmid).status.current.get) return {"status": "success", "vmid": vmid, "node": node, "data": data} except Exception as e: - return format_error_response(e, suggestion="Use list_containers to see available containers.") + return format_error_response( + e, suggestion="Use list_containers to see available containers." + ) @mcp.tool() @@ -84,7 +96,7 @@ async def get_container_config(vmid: int, node: str | None = None) -> dict: try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) data = await client.api_call(client.api.nodes(node).lxc(vmid).config.get) return {"status": "success", "vmid": vmid, "node": node, "config": data} except Exception as e: @@ -102,7 +114,7 @@ async def start_container(vmid: int, node: str | None = None) -> dict: try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("start_container", vmid=vmid, node=node) logger.info("Starting container %d on %s", vmid, node) @@ -124,7 +136,7 @@ async def stop_container(vmid: int, node: str | None = None) -> dict: client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("stop_container", vmid=vmid, node=node) logger.warning("Stopping container %d on %s", vmid, node) @@ -147,7 +159,7 @@ async def shutdown_container(vmid: int, node: str | None = None, timeout: int = client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("shutdown_container", vmid=vmid, node=node) logger.info("Graceful shutdown of container %d on %s", vmid, node) @@ -171,7 +183,7 @@ async def reboot_container(vmid: int, node: str | None = None) -> dict: client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("reboot_container", vmid=vmid, node=node) logger.info("Rebooting container %d on %s", vmid, node) @@ -183,8 +195,12 @@ async def reboot_container(vmid: int, node: str | None = None) -> dict: @mcp.tool() async def clone_container( - vmid: int, newid: int, name: str, node: str | None = None, - full: bool = True, target_node: str | None = None, + vmid: int, + newid: int, + name: str, + node: str | None = None, + full: bool = True, + target_node: str | None = None, ) -> dict: """Clone an LXC container. @@ -200,7 +216,7 @@ async def clone_container( client = get_client() validate_vmid(vmid) validate_vmid(newid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("clone_container", vmid=vmid, newid=newid, name=name) kwargs = {"newid": newid, "hostname": name, "full": 1 if full else 0} @@ -215,8 +231,11 @@ async def clone_container( @mcp.tool() async def migrate_container( - vmid: int, target_node: str, node: str | None = None, - online: bool = False, restart: bool = True, + vmid: int, + target_node: str, + node: str | None = None, + online: bool = False, + restart: bool = True, ) -> dict: """Migrate an LXC container to another node. @@ -230,13 +249,15 @@ async def migrate_container( try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("migrate_container", vmid=vmid, target=target_node) logger.info("Migrating container %d from %s to %s", vmid, node, target_node) upid = await client.api_call( client.api.nodes(node).lxc(vmid).migrate.post, - target=target_node, online=1 if online else 0, restart=1 if restart else 0, + target=target_node, + online=1 if online else 0, + restart=1 if restart else 0, ) return format_task_result({"data": upid}) except Exception as e: @@ -245,12 +266,20 @@ async def migrate_container( @mcp.tool() async def create_container( - node: str, ostemplate: str, hostname: str, - vmid: int | None = None, password: str | None = None, - ssh_public_keys: str | None = None, memory: int = 512, - swap: int = 512, cores: int = 1, rootfs_size: str = "8", - storage: str = "local-lvm", net_bridge: str = "vmbr0", - ip_config: str = "dhcp", unprivileged: bool = True, + node: str, + ostemplate: str, + hostname: str, + vmid: int | None = None, + password: str | None = None, + ssh_public_keys: str | None = None, + memory: int = 512, + swap: int = 512, + cores: int = 1, + rootfs_size: str = "8", + storage: str = "local-lvm", + net_bridge: str = "vmbr0", + ip_config: str = "dhcp", + unprivileged: bool = True, start_after_create: bool = False, ) -> dict: """Create a new LXC container. @@ -312,8 +341,11 @@ async def create_container( @mcp.tool() async def delete_container( - vmid: int, node: str | None = None, purge: bool = True, - force: bool = False, confirm: bool = False, + vmid: int, + node: str | None = None, + purge: bool = True, + force: bool = False, + confirm: bool = False, ) -> dict: """Permanently delete an LXC container. Set confirm=True to execute. @@ -328,14 +360,14 @@ async def delete_container( client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if not confirm: ct_data = await client.api_call(client.api.nodes(node).lxc(vmid).status.current.get) return { "status": "confirmation_required", "warning": ( - f"This will PERMANENTLY DELETE container {vmid} ({ct_data.get('name', 'unnamed')}). " - f"This cannot be undone." + f"This will PERMANENTLY DELETE container {vmid} " + f"({ct_data.get('name', 'unnamed')}). This cannot be undone." ), "action": "Call delete_container again with confirm=True to proceed.", "container_info": {"vmid": vmid, "name": ct_data.get("name"), "node": node}, @@ -356,11 +388,16 @@ async def delete_container( @mcp.tool() async def modify_container_config( - vmid: int, node: str | None = None, - memory: int | None = None, swap: int | None = None, - cores: int | None = None, hostname: str | None = None, - description: str | None = None, onboot: bool | None = None, - tags: str | None = None, extra_config: str | None = None, + vmid: int, + node: str | None = None, + memory: int | None = None, + swap: int | None = None, + cores: int | None = None, + hostname: str | None = None, + description: str | None = None, + onboot: bool | None = None, + tags: str | None = None, + extra_config: str | None = None, ) -> dict: """Modify LXC container configuration. @@ -380,7 +417,7 @@ async def modify_container_config( client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("modify_container_config", vmid=vmid, node=node) kwargs = {} @@ -400,13 +437,21 @@ async def modify_container_config( kwargs["tags"] = tags if extra_config: extra = json.loads(extra_config) - # Prevent overriding safety-relevant parameters - blocked_keys = {"vmid", "node", "digest"} - extra = {k: v for k, v in extra.items() if k not in blocked_keys} + unsafe_keys = [k for k in extra if k not in CT_SAFE_CONFIG_KEYS] + if unsafe_keys: + return format_error_response( + Exception( + f"Keys not in allowlist: {unsafe_keys}. " + f"Modifying these keys is restricted for safety." + ) + ) kwargs.update(extra) if not kwargs: - return {"status": "error", "error_type": "InvalidParameterError", - "message": "No configuration changes specified."} + return { + "status": "error", + "error_type": "InvalidParameterError", + "message": "No configuration changes specified.", + } logger.info("Modifying container %d config: %s", vmid, list(kwargs.keys())) await client.api_call(client.api.nodes(node).lxc(vmid).config.put, **kwargs) return {"status": "success", "vmid": vmid, "node": node, "changes": list(kwargs.keys())} diff --git a/src/proxmox_mcp/tools/disk.py b/src/proxmox_mcp/tools/disk.py new file mode 100644 index 0000000..df90691 --- /dev/null +++ b/src/proxmox_mcp/tools/disk.py @@ -0,0 +1,742 @@ +"""Physical disk management tools for Proxmox VE nodes. + +Provides tools for listing, partitioning, formatting, mounting, and +unmounting physical block devices. Uses the Proxmox API where available +and SSH for operations the API does not cover. +""" + +import json +import logging + +from proxmox_mcp.utils.errors import ( + DeviceInUseError, + DeviceNotFoundError, + SafetyGateError, + format_error_response, +) +from proxmox_mcp.utils.formatters import format_bytes +from proxmox_mcp.utils.sanitizers import ( + check_shell_injection, + validate_device_path, + validate_filesystem, + validate_label, + validate_mkfs_options, + validate_mount_options, + validate_mount_path, + validate_partition_table, + validate_uuid, +) +from proxmox_mcp.utils.validators import validate_node_name + +logger = logging.getLogger("proxmox-mcp") + + +def get_client(): + from proxmox_mcp.server import proxmox_client + + return proxmox_client + + +def get_mcp(): + from proxmox_mcp.server import mcp + + return mcp + + +def get_ssh(): + from proxmox_mcp.server import ssh_executor + + return ssh_executor + + +mcp = get_mcp() + + +# --------------------------------------------------------------------------- +# Shared helpers +# --------------------------------------------------------------------------- + + +async def _check_device_exists(ssh, node: str, device: str) -> None: + """Verify a device exists and is a block device.""" + result = await ssh.execute(node, f"test -b {device} && echo exists") + if result.exit_code != 0 or "exists" not in result.stdout: + raise DeviceNotFoundError(f"Device {device} not found or is not a block device on {node}.") + + +async def _check_not_boot_disk(ssh, node: str, device: str) -> None: + """Reject if any partition on the device is mounted as /, /boot, or /boot/efi.""" + result = await ssh.execute(node, f"lsblk -no MOUNTPOINT {device} {device}[0-9]* 2>/dev/null") + if result.success: + mounts = [line.strip() for line in result.stdout.splitlines() if line.strip()] + critical = {"/", "/boot", "/boot/efi"} + found = critical.intersection(mounts) + if found: + raise SafetyGateError( + f"Device {device} contains boot partitions mounted at: {', '.join(found)}. " + f"Refusing to operate on a boot disk." + ) + + +async def _check_not_in_use(ssh, node: str, device: str) -> dict | None: + """Check if any partition on the device is mounted, in LVM, ZFS, or MD RAID. + + Returns details about usage if in use, None if free. + Raises DeviceInUseError if the device is in use. + """ + # Check mounts + result = await ssh.execute( + node, + f"findmnt -rno SOURCE,TARGET -S {device} 2>/dev/null; " + f"findmnt -rno SOURCE,TARGET -S {device}[0-9]* 2>/dev/null", + ) + if result.success and result.stdout.strip(): + raise DeviceInUseError(f"Device {device} has mounted partitions:\n{result.stdout.strip()}") + + # Check LVM + result = await ssh.execute( + node, f"pvs --noheadings -o pv_name,vg_name 2>/dev/null | grep -E '{device}'" + ) + if result.success and result.stdout.strip(): + raise DeviceInUseError( + f"Device {device} is an LVM physical volume:\n{result.stdout.strip()}" + ) + + # Check ZFS + result = await ssh.execute( + node, "zpool status 2>/dev/null | grep -E '" + device.split("/")[-1] + "'" + ) + if result.success and result.stdout.strip(): + raise DeviceInUseError(f"Device {device} is part of a ZFS pool:\n{result.stdout.strip()}") + + # Check MD RAID + result = await ssh.execute(node, f"grep '{device.split('/')[-1]}' /proc/mdstat 2>/dev/null") + if result.success and result.stdout.strip(): + raise DeviceInUseError( + f"Device {device} is part of an MD RAID array:\n{result.stdout.strip()}" + ) + + return None + + +# --------------------------------------------------------------------------- +# Tool 1: list_physical_disks +# --------------------------------------------------------------------------- + + +@mcp.tool() +async def list_physical_disks( + node: str, + include_partitions: bool = True, + filter_unused: bool = False, +) -> dict: + """Enumerate all physical block devices on a Proxmox node. + + Returns disk model, serial, size, SMART health, partition details, + and usage status (mounted, LVM, ZFS, etc.). + + Args: + node: Target Proxmox node name (e.g., 'hobbiton'). + include_partitions: Include partition table details for each disk. + filter_unused: Only show disks not currently in use. + """ + try: + client = get_client() + ssh = get_ssh() + validate_node_name(node) + client.validate_node(node) + logger.info("Listing physical disks on node '%s'", node) + + # Primary: Proxmox API for disk list + api_disks = await client.api_call(client.api.nodes(node).disks.list.get) + + # Enrich with SSH lsblk for partition-level detail + lsblk_data = {} + if include_partitions: + result = await ssh.execute( + node, + "lsblk -Jb -o NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,UUID,MODEL,SERIAL," + "ROTA,TRAN,PTTYPE,PKNAME", + ) + if result.success: + try: + parsed = json.loads(result.stdout) + for dev in parsed.get("blockdevices", []): + lsblk_data[f"/dev/{dev['name']}"] = dev + except (json.JSONDecodeError, KeyError): + logger.warning("Failed to parse lsblk JSON output") + + # Check LVM PVs for usage detection + lvm_pvs = set() + pv_result = await ssh.execute(node, "pvs --noheadings -o pv_name 2>/dev/null") + if pv_result.success: + lvm_pvs = {line.strip() for line in pv_result.stdout.splitlines() if line.strip()} + + disks = [] + for api_disk in api_disks: + dev_path = api_disk.get("devpath", "") + size_bytes = api_disk.get("size", 0) + + # Determine usage type + used = api_disk.get("used", "") + usage_type = used if used else "unused" + in_use = usage_type != "unused" + + if filter_unused and in_use: + continue + + disk_info = { + "device": dev_path, + "model": api_disk.get("model", "Unknown"), + "serial": api_disk.get("serial", "Unknown"), + "size_bytes": size_bytes, + "size_human": format_bytes(size_bytes), + "transport": api_disk.get("type", "unknown"), + "rotation": bool(api_disk.get("rpm", 0)), + "smart_status": api_disk.get("health", "UNKNOWN"), + "gpt_label": api_disk.get("gpt", "unknown"), + "in_use": in_use, + "usage_type": usage_type, + } + + # Add partition info from lsblk + if include_partitions and dev_path in lsblk_data: + lsblk_dev = lsblk_data[dev_path] + partitions = [] + for child in lsblk_dev.get("children", []): + if child.get("type") not in ("part", "partition"): + continue + child_dev = f"/dev/{child['name']}" + part_in_use = bool(child.get("mountpoint")) or child_dev in lvm_pvs + partitions.append( + { + "device": child_dev, + "size_bytes": child.get("size", 0), + "size_human": format_bytes(child.get("size", 0)), + "filesystem": child.get("fstype") or "none", + "mountpoint": child.get("mountpoint"), + "uuid": child.get("uuid"), + "in_use": part_in_use, + } + ) + disk_info["partitions"] = partitions + + disks.append(disk_info) + + return { + "status": "success", + "node": node, + "count": len(disks), + "disks": disks, + } + except Exception as e: + logger.error("Failed to list physical disks on '%s': %s", node, e) + return format_error_response(e) + + +# --------------------------------------------------------------------------- +# Tool 2: partition_disk +# --------------------------------------------------------------------------- + + +@mcp.tool() +async def partition_disk( + node: str, + device: str, + partition_table: str = "gpt", + filesystem: str = "ext4", + label: str | None = None, + confirm_destructive: bool = False, +) -> dict: + """Create a partition table and single partition on a physical disk. + + Creates a GPT (or msdos) partition table with a single partition spanning + the entire disk, optionally formatted with a filesystem. + + WARNING: This DESTROYS all data on the target device. + + Args: + node: Target Proxmox node name. + device: Whole disk device path (e.g., '/dev/sdb'). Must NOT be a partition. + partition_table: Partition table type: 'gpt' or 'msdos'. + filesystem: Filesystem to create: 'ext4', 'xfs', or 'vfat'. Use 'none' to skip. + label: Optional filesystem label (max 16 chars, alphanumeric). + confirm_destructive: MUST be True. Acknowledges data destruction. + """ + try: + ssh = get_ssh() + client = get_client() + validate_node_name(node) + client.validate_node(node) + + # Validate all inputs + validate_device_path(device, allow_partition=False) + validate_partition_table(partition_table) + if filesystem != "none": + validate_filesystem(filesystem) + if label: + validate_label(label) + + # Safety gate 1: confirm_destructive + if not confirm_destructive: + return { + "status": "confirmation_required", + "warning": ( + f"This will DESTROY ALL DATA on {device}. " + f"A new {partition_table} partition table will be created " + f"with a single {filesystem} partition." + ), + "action": "Call partition_disk again with confirm_destructive=True to proceed.", + "device": device, + } + + # Safety gate 2: device exists + await _check_device_exists(ssh, node, device) + + # Safety gate 3: not a boot disk + await _check_not_boot_disk(ssh, node, device) + + # Safety gate 4: not in active use + await _check_not_in_use(ssh, node, device) + + # Dry run check + if client.is_dry_run: + return client.dry_run_response( + "partition_disk", + device=device, + node=node, + partition_table=partition_table, + filesystem=filesystem, + ) + + logger.warning( + "Partitioning disk %s on %s (table=%s, fs=%s)", + device, + node, + partition_table, + filesystem, + ) + + # Step 1: Wipe existing signatures + result = await ssh.execute(node, f"wipefs -a {device}", timeout=30) + if not result.success: + return format_error_response( + Exception(f"wipefs failed: {result.stderr}"), + suggestion="Check that the device is not in use.", + ) + + # Step 2: Create GPT partition table and single partition spanning entire disk + # Use sgdisk (available on Proxmox) with parted as fallback + # sgdisk -Z: zap all partition data, -n 1:0:0: new partition 1 using all space, + # -t 1:8300: set type to Linux filesystem + if partition_table == "gpt": + result = await ssh.execute( + node, f"sgdisk -Z {device} && sgdisk -n 1:0:0 -t 1:8300 {device}", timeout=30 + ) + else: + # msdos/MBR: fall back to sfdisk + result = await ssh.execute( + node, f"echo ',,L;' | sfdisk --label dos {device}", timeout=30 + ) + if not result.success: + return format_error_response( + Exception(f"Partitioning failed: {result.stderr}") + ) + + # Step 3: Re-read partition table + await ssh.execute(node, f"blockdev --rereadpt {device} && sleep 1", timeout=15) + + partition = f"{device}1" + + # Step 5: Create filesystem (if requested) + fs_uuid = None + if filesystem != "none": + label_flag = "" + if label: + if filesystem == "vfat": + label_flag = f" -n {label}" + else: + label_flag = f" -L {label}" + + mkfs_cmd = f"mkfs.{filesystem}{label_flag} {partition}" + result = await ssh.execute(node, mkfs_cmd, timeout=120) + if not result.success: + # Partial success: partition created but filesystem failed + return { + "status": "partial_success", + "warning": "Partition created but filesystem creation failed.", + "device": device, + "partition": partition, + "partition_table": partition_table, + "filesystem_error": result.stderr, + } + + # Step 6: Get UUID + blkid_result = await ssh.execute(node, f"blkid -o export {partition}") + if blkid_result.success: + for line in blkid_result.stdout.splitlines(): + if line.startswith("UUID="): + fs_uuid = line.split("=", 1)[1] + break + + return { + "status": "success", + "device": device, + "partition_table": partition_table, + "partitions_created": [ + { + "device": partition, + "filesystem": filesystem if filesystem != "none" else None, + "uuid": fs_uuid, + "label": label, + "size_human": "entire disk", + } + ], + } + except Exception as e: + logger.error("Failed to partition disk %s on '%s': %s", device, node, e) + return format_error_response(e) + + +# --------------------------------------------------------------------------- +# Tool 3: format_disk +# --------------------------------------------------------------------------- + + +@mcp.tool() +async def format_disk( + node: str, + device: str, + filesystem: str, + label: str | None = None, + options: str | None = None, + confirm_destructive: bool = False, +) -> dict: + """Create a filesystem on an existing partition or disk. + + Useful when a partition already exists but needs a new or different filesystem. + + WARNING: This DESTROYS all data on the target device/partition. + + Args: + node: Target Proxmox node name. + device: Partition or disk path (e.g., '/dev/sdb1'). + filesystem: Filesystem type: 'ext4', 'xfs', or 'vfat'. + label: Optional filesystem label (max 16 chars). + options: Additional mkfs options (e.g., '-m 1'). Validated against allowlist. + confirm_destructive: MUST be True. Acknowledges data destruction. + """ + try: + ssh = get_ssh() + client = get_client() + validate_node_name(node) + client.validate_node(node) + + # Validate inputs + validate_device_path(device, allow_partition=True) + validate_filesystem(filesystem) + if label: + validate_label(label) + if options: + validate_mkfs_options(options) + + # Safety gate: confirm + if not confirm_destructive: + return { + "status": "confirmation_required", + "warning": ( + f"This will DESTROY ALL DATA on {device}. " + f"A new {filesystem} filesystem will be created." + ), + "action": "Call format_disk again with confirm_destructive=True to proceed.", + "device": device, + } + + # Safety gate: device exists + await _check_device_exists(ssh, node, device) + + # Safety gate: not mounted + mount_check = await ssh.execute(node, f"findmnt -rno TARGET {device} 2>/dev/null") + if mount_check.success and mount_check.stdout.strip(): + raise DeviceInUseError( + f"Device {device} is mounted at {mount_check.stdout.strip()}. Unmount first." + ) + + if client.is_dry_run: + return client.dry_run_response( + "format_disk", + device=device, + node=node, + filesystem=filesystem, + ) + + logger.warning("Formatting %s on %s as %s", device, node, filesystem) + + # Wipe existing signatures + await ssh.execute(node, f"wipefs -a {device}", timeout=30) + + # Build mkfs command + label_flag = "" + if label: + label_flag = f" -n {label}" if filesystem == "vfat" else f" -L {label}" + + extra = f" {options}" if options else "" + force = " -F" if filesystem == "ext4" else (" -f" if filesystem == "xfs" else "") + mkfs_cmd = f"mkfs.{filesystem}{force}{label_flag}{extra} {device}" + + result = await ssh.execute(node, mkfs_cmd, timeout=120) + if not result.success: + return format_error_response(Exception(f"mkfs.{filesystem} failed: {result.stderr}")) + + # Get UUID + fs_uuid = None + blkid_result = await ssh.execute(node, f"blkid -o export {device}") + if blkid_result.success: + for line in blkid_result.stdout.splitlines(): + if line.startswith("UUID="): + fs_uuid = line.split("=", 1)[1] + break + + return { + "status": "success", + "device": device, + "filesystem": filesystem, + "uuid": fs_uuid, + "label": label, + } + except Exception as e: + logger.error("Failed to format %s on '%s': %s", device, node, e) + return format_error_response(e) + + +# --------------------------------------------------------------------------- +# Tool 4: create_mount_point +# --------------------------------------------------------------------------- + + +@mcp.tool() +async def create_mount_point( + node: str, + device: str, + mount_path: str, + filesystem: str | None = None, + mount_options: str = "defaults", + persist_fstab: bool = True, +) -> dict: + """Mount a filesystem to a path and optionally persist in /etc/fstab. + + Args: + node: Target Proxmox node name. + device: Device or partition path (e.g., '/dev/sdb1'). + mount_path: Absolute path for the mount point. Must be under /mnt/, /srv/, or /media/. + filesystem: Filesystem type. Auto-detected if omitted. + mount_options: Mount options (e.g., 'defaults,noatime'). Validated against allowlist. + persist_fstab: Add entry to /etc/fstab for persistence across reboots. + """ + try: + ssh = get_ssh() + client = get_client() + validate_node_name(node) + client.validate_node(node) + + # Validate inputs + validate_device_path(device, allow_partition=True) + validate_mount_path(mount_path) + validate_mount_options(mount_options) + if filesystem: + validate_filesystem(filesystem) + + # Safety gate: device exists and has a filesystem + await _check_device_exists(ssh, node, device) + + blkid_result = await ssh.execute(node, f"blkid -o export {device}") + detected_fs = None + device_uuid = None + if blkid_result.success: + for line in blkid_result.stdout.splitlines(): + if line.startswith("TYPE="): + detected_fs = line.split("=", 1)[1] + elif line.startswith("UUID="): + device_uuid = line.split("=", 1)[1] + + fs_type = filesystem or detected_fs + if not fs_type: + return format_error_response( + Exception(f"No filesystem detected on {device}. Format it first."), + suggestion="Use format_disk to create a filesystem before mounting.", + ) + + # Safety gate: path not already a mount point + mount_check = await ssh.execute(node, f"findmnt -rno TARGET {mount_path} 2>/dev/null") + if mount_check.success and mount_check.stdout.strip(): + raise DeviceInUseError(f"Path {mount_path} is already a mount point.") + + if client.is_dry_run: + return client.dry_run_response( + "create_mount_point", + device=device, + mount_path=mount_path, + node=node, + filesystem=fs_type, + ) + + logger.warning( + "Mounting %s at %s on %s (fs=%s, options=%s)", + device, + mount_path, + node, + fs_type, + mount_options, + ) + + # Create mount directory + result = await ssh.execute(node, f"mkdir -p {mount_path}") + if not result.success: + return format_error_response( + Exception(f"Failed to create mount directory: {result.stderr}") + ) + + # Mount + result = await ssh.execute( + node, f"mount -t {fs_type} -o {mount_options} {device} {mount_path}" + ) + if not result.success: + return format_error_response(Exception(f"Mount failed: {result.stderr}")) + + # Verify mount + verify = await ssh.execute(node, f"findmnt -rno SOURCE {mount_path}") + if not verify.success or not verify.stdout.strip(): + return format_error_response( + Exception("Mount appeared to succeed but verification failed.") + ) + + # Persist to fstab + fstab_added = False + fstab_line = None + if persist_fstab and device_uuid: + # Validate UUID format before shell interpolation + validate_uuid(device_uuid) + fstab_line = f"UUID={device_uuid} {mount_path} {fs_type} {mount_options} 0 2" + + # Backup fstab + await ssh.execute(node, "cp /etc/fstab /etc/fstab.bak.$(date +%s)") + + # Add entry using printf for safety + result = await ssh.execute( + node, f"printf '%s\\n' '{fstab_line}' >> /etc/fstab" + ) + if result.success: + # Validate fstab + validate = await ssh.execute(node, "mount -a --fake") + if validate.success: + fstab_added = True + else: + # Rollback fstab + logger.error("fstab validation failed, rolling back: %s", validate.stderr) + await ssh.execute( + node, + "cp $(ls -t /etc/fstab.bak.* | head -1) /etc/fstab", + ) + logger.warning("fstab rolled back after validation failure") + + return { + "status": "success", + "device": device, + "uuid": device_uuid, + "mount_path": mount_path, + "filesystem": fs_type, + "mount_options": mount_options, + "fstab_entry_added": fstab_added, + "fstab_line": fstab_line if fstab_added else None, + } + except Exception as e: + logger.error("Failed to mount %s at %s on '%s': %s", device, mount_path, node, e) + return format_error_response(e) + + +# --------------------------------------------------------------------------- +# Tool 5: unmount_path +# --------------------------------------------------------------------------- + + +@mcp.tool() +async def unmount_path( + node: str, + mount_path: str, + remove_fstab_entry: bool = False, + force: bool = False, +) -> dict: + """Unmount a filesystem and optionally remove its fstab entry. + + Args: + node: Target Proxmox node name. + mount_path: Path to unmount (e.g., '/mnt/data'). + remove_fstab_entry: Remove the matching fstab entry. + force: Use lazy unmount (umount -l) if device is busy. + """ + try: + ssh = get_ssh() + client = get_client() + validate_node_name(node) + client.validate_node(node) + validate_mount_path(mount_path) + + # Safety gate: reject critical system mounts + check_shell_injection(mount_path, "mount_path") + + # Verify actually mounted + check = await ssh.execute(node, f"findmnt -rno SOURCE {mount_path} 2>/dev/null") + if not check.success or not check.stdout.strip(): + return format_error_response( + Exception(f"Path {mount_path} is not currently mounted."), + suggestion="Check the mount path and try again.", + ) + + if client.is_dry_run: + return client.dry_run_response( + "unmount_path", + mount_path=mount_path, + node=node, + ) + + logger.warning("Unmounting %s on %s (force=%s)", mount_path, node, force) + + # Unmount + umount_flag = " -l" if force else "" + result = await ssh.execute(node, f"umount{umount_flag} {mount_path}") + if not result.success: + return format_error_response( + Exception(f"Unmount failed: {result.stderr}"), + suggestion="Try with force=True for lazy unmount, or check open files with lsof.", + ) + + # Remove fstab entry + fstab_removed = False + if remove_fstab_entry: + # Backup fstab first + await ssh.execute(node, "cp /etc/fstab /etc/fstab.bak.$(date +%s)") + + # Remove line matching the mount path + escaped_path = mount_path.replace("/", "\\/") + result = await ssh.execute(node, f"sed -i '/ {escaped_path} /d' /etc/fstab") + if result.success: + # Validate fstab after modification + validate = await ssh.execute(node, "mount -a --fake") + if validate.success: + fstab_removed = True + else: + logger.error("fstab validation failed after removal, rolling back") + await ssh.execute( + node, + "cp $(ls -t /etc/fstab.bak.* | head -1) /etc/fstab", + ) + + return { + "status": "success", + "mount_path": mount_path, + "unmounted": True, + "fstab_entry_removed": fstab_removed, + } + except Exception as e: + logger.error("Failed to unmount %s on '%s': %s", mount_path, node, e) + return format_error_response(e) diff --git a/src/proxmox_mcp/tools/network.py b/src/proxmox_mcp/tools/network.py index 32fb859..dd3698d 100644 --- a/src/proxmox_mcp/tools/network.py +++ b/src/proxmox_mcp/tools/network.py @@ -1,33 +1,31 @@ """Network and firewall management tools.""" import logging -from proxmox_mcp.utils.errors import format_error_response -from proxmox_mcp.utils.validators import validate_vmid, validate_node_name + +from proxmox_mcp.utils.errors import InvalidParameterError, format_error_response +from proxmox_mcp.utils.validators import validate_node_name, validate_vmid + +VALID_FW_ACTIONS = frozenset({"ACCEPT", "DROP", "REJECT"}) +VALID_FW_TYPES = frozenset({"in", "out", "group"}) logger = logging.getLogger("proxmox-mcp") def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp mcp = get_mcp() -async def _resolve_node(client, vmid: int, node: str | None) -> str: - if node: - validate_node_name(node) - client.validate_node(node) - return node - return await client.resolve_node_for_vmid(vmid) - - @mcp.tool() async def get_node_firewall_rules(node: str) -> dict: """List firewall rules configured on a node. @@ -56,7 +54,7 @@ async def get_vm_firewall_rules(vmid: int, node: str | None = None) -> dict: try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) # Try QEMU first, fall back to LXC try: data = await client.api_call(client.api.nodes(node).qemu(vmid).firewall.rules.get) @@ -78,7 +76,7 @@ async def get_vm_interfaces(vmid: int, node: str | None = None) -> dict: try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) # Try QEMU agent first, fall back to LXC interfaces try: data = await client.api_call( @@ -90,4 +88,235 @@ async def get_vm_interfaces(vmid: int, node: str | None = None) -> dict: interfaces = data return {"status": "success", "vmid": vmid, "node": node, "interfaces": interfaces} except Exception as e: - return format_error_response(e, suggestion="VM must be running. QEMU VMs require the guest agent.") + return format_error_response( + e, suggestion="VM must be running. QEMU VMs require the guest agent." + ) + + +@mcp.tool() +async def create_node_firewall_rule( + node: str, + action: str, + type: str, + enable: bool = True, + source: str | None = None, + dest: str | None = None, + proto: str | None = None, + dport: str | None = None, + sport: str | None = None, + comment: str | None = None, + pos: int | None = None, +) -> dict: + """Create a firewall rule on a node. + + Args: + node: The node name. + action: Rule action - 'ACCEPT', 'DROP', or 'REJECT'. + type: Rule type - 'in' (incoming), 'out' (outgoing), or 'group'. + enable: Enable the rule (default True). + source: Source address/CIDR (e.g. '10.0.0.0/24'). + dest: Destination address/CIDR. + proto: Protocol (e.g. 'tcp', 'udp', 'icmp'). + dport: Destination port or range (e.g. '80', '8000-9000'). + sport: Source port or range. + comment: Rule comment/description. + pos: Position in rule list (0-based). Appended if omitted. + """ + try: + client = get_client() + validate_node_name(node) + client.validate_node(node) + if action not in VALID_FW_ACTIONS: + raise InvalidParameterError( + f"action must be one of {sorted(VALID_FW_ACTIONS)}, got '{action}'." + ) + if type not in VALID_FW_TYPES: + raise InvalidParameterError( + f"type must be one of {sorted(VALID_FW_TYPES)}, got '{type}'." + ) + if client.is_dry_run: + return client.dry_run_response( + "create_node_firewall_rule", node=node, action=action, type=type + ) + kwargs: dict = {"action": action, "type": type, "enable": 1 if enable else 0} + if source: + kwargs["source"] = source + if dest: + kwargs["dest"] = dest + if proto: + kwargs["proto"] = proto + if dport: + kwargs["dport"] = dport + if sport: + kwargs["sport"] = sport + if comment: + kwargs["comment"] = comment + if pos is not None: + kwargs["pos"] = pos + logger.info("Creating firewall rule on node '%s': %s %s", node, action, type) + await client.api_call(client.api.nodes(node).firewall.rules.post, **kwargs) + return {"status": "success", "node": node, "rule": kwargs} + except Exception as e: + return format_error_response(e) + + +@mcp.tool() +async def delete_node_firewall_rule( + node: str, + pos: int, + confirm: bool = False, +) -> dict: + """Delete a firewall rule from a node by position. Set confirm=True to execute. + + Args: + node: The node name. + pos: Rule position (0-based index from get_node_firewall_rules). + confirm: Must be True to execute. + """ + try: + client = get_client() + validate_node_name(node) + client.validate_node(node) + if not confirm: + return { + "status": "confirmation_required", + "warning": f"This will delete firewall rule at position {pos} on node '{node}'.", + "action": "Call delete_node_firewall_rule with confirm=True to proceed.", + } + if client.is_dry_run: + return client.dry_run_response( + "delete_node_firewall_rule", node=node, pos=pos + ) + logger.warning("Deleting firewall rule %d on node '%s'", pos, node) + await client.api_call(client.api.nodes(node).firewall.rules(pos).delete) + return {"status": "success", "node": node, "deleted_pos": pos} + except Exception as e: + return format_error_response(e) + + +@mcp.tool() +async def create_vm_firewall_rule( + vmid: int, + action: str, + type: str, + node: str | None = None, + vm_type: str = "qemu", + enable: bool = True, + source: str | None = None, + dest: str | None = None, + proto: str | None = None, + dport: str | None = None, + sport: str | None = None, + comment: str | None = None, + pos: int | None = None, +) -> dict: + """Create a firewall rule on a VM or container. + + Args: + vmid: The VM/CT ID. + action: Rule action - 'ACCEPT', 'DROP', or 'REJECT'. + type: Rule type - 'in' (incoming), 'out' (outgoing), or 'group'. + node: The node name. Auto-detected if omitted. + vm_type: 'qemu' for VMs or 'lxc' for containers (default 'qemu'). + enable: Enable the rule (default True). + source: Source address/CIDR. + dest: Destination address/CIDR. + proto: Protocol (e.g. 'tcp', 'udp', 'icmp'). + dport: Destination port or range. + sport: Source port or range. + comment: Rule comment/description. + pos: Position in rule list (0-based). + """ + try: + client = get_client() + validate_vmid(vmid) + client.check_protected(vmid) + if action not in VALID_FW_ACTIONS: + raise InvalidParameterError( + f"action must be one of {sorted(VALID_FW_ACTIONS)}, got '{action}'." + ) + if type not in VALID_FW_TYPES: + raise InvalidParameterError( + f"type must be one of {sorted(VALID_FW_TYPES)}, got '{type}'." + ) + node = await client.resolve_node(vmid, node) + if client.is_dry_run: + return client.dry_run_response( + "create_vm_firewall_rule", vmid=vmid, action=action, type=type + ) + kwargs: dict = {"action": action, "type": type, "enable": 1 if enable else 0} + if source: + kwargs["source"] = source + if dest: + kwargs["dest"] = dest + if proto: + kwargs["proto"] = proto + if dport: + kwargs["dport"] = dport + if sport: + kwargs["sport"] = sport + if comment: + kwargs["comment"] = comment + if pos is not None: + kwargs["pos"] = pos + api_path = ( + client.api.nodes(node).qemu(vmid) + if vm_type == "qemu" + else client.api.nodes(node).lxc(vmid) + ) + logger.info( + "Creating firewall rule on %s %d: %s %s", vm_type, vmid, action, type + ) + await client.api_call(api_path.firewall.rules.post, **kwargs) + return {"status": "success", "vmid": vmid, "node": node, "rule": kwargs} + except Exception as e: + return format_error_response(e) + + +@mcp.tool() +async def delete_vm_firewall_rule( + vmid: int, + pos: int, + node: str | None = None, + vm_type: str = "qemu", + confirm: bool = False, +) -> dict: + """Delete a firewall rule from a VM/CT by position. Set confirm=True to execute. + + Args: + vmid: The VM/CT ID. + pos: Rule position (0-based index). + node: The node name. Auto-detected if omitted. + vm_type: 'qemu' for VMs or 'lxc' for containers (default 'qemu'). + confirm: Must be True to execute. + """ + try: + client = get_client() + validate_vmid(vmid) + client.check_protected(vmid) + node = await client.resolve_node(vmid, node) + if not confirm: + return { + "status": "confirmation_required", + "warning": ( + f"This will delete firewall rule at position {pos} " + f"on {vm_type} {vmid}." + ), + "action": "Call delete_vm_firewall_rule with confirm=True.", + } + if client.is_dry_run: + return client.dry_run_response( + "delete_vm_firewall_rule", vmid=vmid, pos=pos + ) + api_path = ( + client.api.nodes(node).qemu(vmid) + if vm_type == "qemu" + else client.api.nodes(node).lxc(vmid) + ) + logger.warning( + "Deleting firewall rule %d on %s %d", pos, vm_type, vmid + ) + await client.api_call(api_path.firewall.rules(pos).delete) + return {"status": "success", "vmid": vmid, "node": node, "deleted_pos": pos} + except Exception as e: + return format_error_response(e) diff --git a/src/proxmox_mcp/tools/node.py b/src/proxmox_mcp/tools/node.py index 632519d..55be300 100644 --- a/src/proxmox_mcp/tools/node.py +++ b/src/proxmox_mcp/tools/node.py @@ -1,8 +1,9 @@ -"""Node-level read-only tools for Proxmox VE.""" +"""Node-level tools for Proxmox VE.""" import logging + from proxmox_mcp.utils.errors import format_error_response -from proxmox_mcp.utils.formatters import format_bytes, format_uptime +from proxmox_mcp.utils.formatters import format_bytes, format_task_result, format_uptime from proxmox_mcp.utils.validators import validate_node_name logger = logging.getLogger("proxmox-mcp") @@ -10,11 +11,13 @@ def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp @@ -35,18 +38,20 @@ async def list_nodes() -> dict: nodes = [] for node in data: - nodes.append({ - "node": node.get("node"), - "status": node.get("status"), - "cpu_usage_percent": round(node.get("cpu", 0) * 100, 2), - "cpu_cores": node.get("maxcpu", 0), - "memory_used": format_bytes(node.get("mem", 0)), - "memory_total": format_bytes(node.get("maxmem", 0)), - "disk_used": format_bytes(node.get("disk", 0)), - "disk_total": format_bytes(node.get("maxdisk", 0)), - "uptime": format_uptime(node.get("uptime", 0)), - "uptime_seconds": node.get("uptime", 0), - }) + nodes.append( + { + "node": node.get("node"), + "status": node.get("status"), + "cpu_usage_percent": round(node.get("cpu", 0) * 100, 2), + "cpu_cores": node.get("maxcpu", 0), + "memory_used": format_bytes(node.get("mem", 0)), + "memory_total": format_bytes(node.get("maxmem", 0)), + "disk_used": format_bytes(node.get("disk", 0)), + "disk_total": format_bytes(node.get("maxdisk", 0)), + "uptime": format_uptime(node.get("uptime", 0)), + "uptime_seconds": node.get("uptime", 0), + } + ) return { "status": "success", @@ -54,7 +59,7 @@ async def list_nodes() -> dict: "nodes": nodes, } except Exception as e: - logger.error(f"Failed to list nodes: {e}") + logger.error("Failed to list nodes: %s", e) return format_error_response(e) @@ -71,7 +76,7 @@ async def get_node_status(node: str) -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching status for node '{node}'") + logger.info("Fetching status for node '%s'", node) data = await client.api_call(client.api.nodes(node).status.get) return { @@ -80,7 +85,7 @@ async def get_node_status(node: str) -> dict: "data": data, } except Exception as e: - logger.error(f"Failed to get status for node '{node}': {e}") + logger.error("Failed to get status for node '%s': %s", node, e) return format_error_response(e) @@ -97,7 +102,7 @@ async def get_node_services(node: str) -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching services for node '{node}'") + logger.info("Fetching services for node '%s'", node) data = await client.api_call(client.api.nodes(node).services.get) return { @@ -107,7 +112,7 @@ async def get_node_services(node: str) -> dict: "services": data, } except Exception as e: - logger.error(f"Failed to get services for node '{node}': {e}") + logger.error("Failed to get services for node '%s': %s", node, e) return format_error_response(e) @@ -124,7 +129,7 @@ async def get_node_network(node: str) -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching network config for node '{node}'") + logger.info("Fetching network config for node '%s'", node) data = await client.api_call(client.api.nodes(node).network.get) return { @@ -134,7 +139,7 @@ async def get_node_network(node: str) -> dict: "interfaces": data, } except Exception as e: - logger.error(f"Failed to get network config for node '{node}': {e}") + logger.error("Failed to get network config for node '%s': %s", node, e) return format_error_response(e) @@ -151,7 +156,7 @@ async def get_node_storage(node: str) -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching storage for node '{node}'") + logger.info("Fetching storage for node '%s'", node) data = await client.api_call(client.api.nodes(node).storage.get) return { @@ -161,7 +166,7 @@ async def get_node_storage(node: str) -> dict: "storage": data, } except Exception as e: - logger.error(f"Failed to get storage for node '{node}': {e}") + logger.error("Failed to get storage for node '%s': %s", node, e) return format_error_response(e) @@ -178,7 +183,7 @@ async def get_node_syslog(node: str, limit: int = 50, since: str | None = None) validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching syslog for node '{node}' (limit={limit}, since={since})") + logger.info("Fetching syslog for node '%s' (limit=%d, since=%s)", node, limit, since) kwargs = {"limit": limit} if since: @@ -193,5 +198,72 @@ async def get_node_syslog(node: str, limit: int = 50, since: str | None = None) "entries": data, } except Exception as e: - logger.error(f"Failed to get syslog for node '{node}': {e}") + logger.error("Failed to get syslog for node '%s': %s", node, e) + return format_error_response(e) + + +@mcp.tool() +async def reboot_node(node: str, confirm: bool = False) -> dict: + """Reboot a Proxmox node. Set confirm=True to execute. + + Args: + node: The node name to reboot. + confirm: Must be True to execute. + """ + try: + validate_node_name(node) + client = get_client() + client.validate_node(node) + if not confirm: + return { + "status": "confirmation_required", + "warning": ( + f"This will REBOOT node '{node}'. " + f"All running VMs/CTs on this node will be affected." + ), + "action": "Call reboot_node again with confirm=True to proceed.", + } + if client.is_dry_run: + return client.dry_run_response("reboot_node", node=node) + logger.warning("Rebooting node '%s'", node) + upid = await client.api_call( + client.api.nodes(node).status.post, command="reboot" + ) + return format_task_result({"data": upid}) + except Exception as e: + logger.error("Failed to reboot node '%s': %s", node, e) + return format_error_response(e) + + +@mcp.tool() +async def shutdown_node(node: str, confirm: bool = False) -> dict: + """Shutdown a Proxmox node. Set confirm=True to execute. + + Args: + node: The node name to shut down. + confirm: Must be True to execute. + """ + try: + validate_node_name(node) + client = get_client() + client.validate_node(node) + if not confirm: + return { + "status": "confirmation_required", + "warning": ( + f"This will SHUT DOWN node '{node}'. " + f"All running VMs/CTs on this node will be stopped. " + f"Physical access may be needed to power it back on." + ), + "action": "Call shutdown_node again with confirm=True to proceed.", + } + if client.is_dry_run: + return client.dry_run_response("shutdown_node", node=node) + logger.warning("Shutting down node '%s'", node) + upid = await client.api_call( + client.api.nodes(node).status.post, command="shutdown" + ) + return format_task_result({"data": upid}) + except Exception as e: + logger.error("Failed to shut down node '%s': %s", node, e) return format_error_response(e) diff --git a/src/proxmox_mcp/tools/storage.py b/src/proxmox_mcp/tools/storage.py index 2a668ee..b24bdd9 100644 --- a/src/proxmox_mcp/tools/storage.py +++ b/src/proxmox_mcp/tools/storage.py @@ -1,7 +1,10 @@ -"""Storage-level read-only tools for Proxmox VE.""" +"""Storage management tools for Proxmox VE.""" import logging + from proxmox_mcp.utils.errors import format_error_response +from proxmox_mcp.utils.formatters import format_task_result +from proxmox_mcp.utils.sanitizers import validate_storage_id from proxmox_mcp.utils.validators import validate_node_name logger = logging.getLogger("proxmox-mcp") @@ -9,11 +12,13 @@ def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp @@ -37,7 +42,7 @@ async def list_storage() -> dict: "storage": data, } except Exception as e: - logger.error(f"Failed to list storage: {e}") + logger.error("Failed to list storage: %s", e) return format_error_response(e) @@ -53,10 +58,8 @@ async def get_storage_status(node: str, storage: str) -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching storage status for '{storage}' on node '{node}'") - data = await client.api_call( - client.api.nodes(node).storage(storage).status.get - ) + logger.info("Fetching storage status for '%s' on node '%s'", storage, node) + data = await client.api_call(client.api.nodes(node).storage(storage).status.get) return { "status": "success", @@ -65,14 +68,12 @@ async def get_storage_status(node: str, storage: str) -> dict: "data": data, } except Exception as e: - logger.error(f"Failed to get storage status for '{storage}' on '{node}': {e}") + logger.error("Failed to get storage status for '%s' on '%s': %s", storage, node, e) return format_error_response(e) @mcp.tool() -async def list_storage_content( - node: str, storage: str, content_type: str | None = None -) -> dict: +async def list_storage_content(node: str, storage: str, content_type: str | None = None) -> dict: """List the contents of a specific storage on a node. Args: @@ -86,17 +87,15 @@ async def list_storage_content( client = get_client() client.validate_node(node) logger.info( - f"Listing content for storage '{storage}' on node '{node}' " - f"(content_type={content_type})" + "Listing content for storage '%s' on node '%s' (content_type=%s)", + storage, node, content_type, ) kwargs = {} if content_type: kwargs["content"] = content_type - data = await client.api_call( - client.api.nodes(node).storage(storage).content.get, **kwargs - ) + data = await client.api_call(client.api.nodes(node).storage(storage).content.get, **kwargs) return { "status": "success", @@ -107,9 +106,7 @@ async def list_storage_content( "content": data, } except Exception as e: - logger.error( - f"Failed to list content for storage '{storage}' on '{node}': {e}" - ) + logger.error("Failed to list content for storage '%s' on '%s': %s", storage, node, e) return format_error_response(e) @@ -125,7 +122,7 @@ async def get_available_isos(node: str, storage: str = "local") -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching available ISOs from '{storage}' on node '{node}'") + logger.info("Fetching available ISOs from '%s' on node '%s'", storage, node) data = await client.api_call( client.api.nodes(node).storage(storage).content.get, content="iso" ) @@ -138,7 +135,7 @@ async def get_available_isos(node: str, storage: str = "local") -> dict: "isos": data, } except Exception as e: - logger.error(f"Failed to get ISOs from '{storage}' on '{node}': {e}") + logger.error("Failed to get ISOs from '%s' on '%s': %s", storage, node, e) return format_error_response(e) @@ -154,7 +151,7 @@ async def get_available_templates(node: str, storage: str = "local") -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching available templates from '{storage}' on node '{node}'") + logger.info("Fetching available templates from '%s' on node '%s'", storage, node) data = await client.api_call( client.api.nodes(node).storage(storage).content.get, content="vztmpl" ) @@ -167,5 +164,312 @@ async def get_available_templates(node: str, storage: str = "local") -> dict: "templates": data, } except Exception as e: - logger.error(f"Failed to get templates from '{storage}' on '{node}': {e}") + logger.error("Failed to get templates from '%s' on '%s': %s", storage, node, e) + return format_error_response(e) + + +# --------------------------------------------------------------------------- +# Storage configuration management (add / remove) +# --------------------------------------------------------------------------- + +VALID_STORAGE_TYPES = frozenset({"dir", "lvm", "lvmthin", "zfspool", "nfs", "cifs", "btrfs"}) +VALID_CONTENT_TYPES = frozenset( + { + "images", + "rootdir", + "vztmpl", + "backup", + "iso", + "snippets", + "import", + } +) +DEFAULT_STORAGE_IDS = frozenset({"local", "local-lvm"}) + + +@mcp.tool() +async def add_storage( + storage_id: str, + storage_type: str, + content: str, + path: str | None = None, + nodes: str | None = None, + shared: bool = False, + disable: bool = False, + vgname: str | None = None, + thinpool: str | None = None, + pool: str | None = None, + server: str | None = None, + export: str | None = None, + share: str | None = None, + username: str | None = None, + password: str | None = None, + domain: str | None = None, + nfs_options: str | None = None, + sparse: bool = True, + mkdir: bool = True, +) -> dict: + """Register a new storage resource in Proxmox VE configuration. + + Makes a mounted filesystem, NFS share, LVM volume group, or other storage + backend available to Proxmox for VM/CT disks, backups, ISOs, etc. + + Args: + storage_id: Unique identifier (e.g., 'local-data'). Alphanumeric, hyphens, underscores. + storage_type: Backend type: 'dir', 'lvm', 'lvmthin', 'zfspool', 'nfs', 'cifs', 'btrfs'. + content: Comma-separated content types (e.g., 'images,iso,vztmpl'). + Valid types: images, rootdir, vztmpl, backup, iso, snippets. + path: Filesystem path for type=dir (must already be mounted). + nodes: Comma-separated node names to restrict storage to. Empty = all nodes. + shared: Mark as shared storage (accessible from all nodes). + disable: Create in disabled state. + vgname: LVM volume group name (for type=lvm or lvmthin). + thinpool: LVM thin pool name (for type=lvmthin). + pool: ZFS pool/dataset name (for type=zfspool). + server: Server hostname or IP (for type=nfs or cifs). + export: NFS export path (for type=nfs). + share: CIFS share name (for type=cifs). + username: CIFS username (for type=cifs). + password: CIFS password (for type=cifs). + domain: CIFS domain (for type=cifs). + nfs_options: NFS mount options (for type=nfs). + sparse: Use thin provisioning (for type=zfspool). + mkdir: Create directory if it doesn't exist (for type=dir). + """ + try: + client = get_client() + + # Validate storage_id + validate_storage_id(storage_id) + + # Validate storage type + if storage_type not in VALID_STORAGE_TYPES: + return format_error_response( + Exception( + f"Invalid storage type '{storage_type}'. " + f"Valid types: {', '.join(sorted(VALID_STORAGE_TYPES))}" + ) + ) + + # Validate content types + content_list = [c.strip() for c in content.split(",") if c.strip()] + invalid_content = set(content_list) - VALID_CONTENT_TYPES + if invalid_content: + return format_error_response( + Exception( + f"Invalid content types: {', '.join(invalid_content)}. " + f"Valid types: {', '.join(sorted(VALID_CONTENT_TYPES))}" + ) + ) + + # Check for duplicate storage ID + existing = await client.api_call(client.api.storage.get) + existing_ids = {s.get("storage") for s in existing} + if storage_id in existing_ids: + return format_error_response( + Exception(f"Storage ID '{storage_id}' already exists."), + suggestion="Choose a different storage ID or remove the existing one first.", + ) + + # Build API parameters + api_params: dict = { + "storage": storage_id, + "type": storage_type, + "content": ",".join(content_list), + } + + if nodes: + api_params["nodes"] = nodes + if shared: + api_params["shared"] = 1 + if disable: + api_params["disable"] = 1 + + # Type-specific parameters + if storage_type == "dir": + if not path: + return format_error_response(Exception("'path' is required for type=dir.")) + api_params["path"] = path + if mkdir: + api_params["mkdir"] = 1 + + elif storage_type in ("lvm", "lvmthin"): + if not vgname: + return format_error_response( + Exception("'vgname' is required for type=lvm/lvmthin.") + ) + api_params["vgname"] = vgname + if storage_type == "lvmthin" and thinpool: + api_params["thinpool"] = thinpool + + elif storage_type == "zfspool": + if not pool: + return format_error_response(Exception("'pool' is required for type=zfspool.")) + api_params["pool"] = pool + if sparse: + api_params["sparse"] = 1 + + elif storage_type == "nfs": + if not server or not export: + return format_error_response( + Exception("'server' and 'export' are required for type=nfs.") + ) + api_params["server"] = server + api_params["export"] = export + if nfs_options: + api_params["options"] = nfs_options + + elif storage_type == "cifs": + if not server or not share: + return format_error_response( + Exception("'server' and 'share' are required for type=cifs.") + ) + api_params["server"] = server + api_params["share"] = share + if username: + api_params["username"] = username + if password: + api_params["password"] = password + if domain: + api_params["domain"] = domain + + if client.is_dry_run: + return client.dry_run_response("add_storage", **api_params) + + logger.warning( + "Adding storage '%s' (type=%s, content=%s)", + storage_id, + storage_type, + content, + ) + + await client.api_call(client.api.storage.post, **api_params) + + return { + "status": "success", + "storage_id": storage_id, + "type": storage_type, + "content": content_list, + "proxmox_config": api_params, + } + except Exception as e: + logger.error("Failed to add storage '%s': %s", storage_id, e) + return format_error_response(e) + + +@mcp.tool() +async def remove_storage( + storage_id: str, + confirm: bool = False, +) -> dict: + """Remove a storage configuration from Proxmox. Does NOT delete data. + + Only unregisters the storage from Proxmox configuration. The underlying + data (files, LVM volumes, ZFS datasets) remains untouched. + + Args: + storage_id: Storage identifier to remove. + confirm: Must be True to proceed. Will fail if storage contains active VM/CT disks. + """ + try: + client = get_client() + validate_storage_id(storage_id) + + # Never remove default storage + if storage_id in DEFAULT_STORAGE_IDS: + return format_error_response( + Exception( + f"Cannot remove default storage '{storage_id}'. " + f"This is a Proxmox default and should not be removed." + ) + ) + + if not confirm: + return { + "status": "confirmation_required", + "warning": ( + f"This will unregister storage '{storage_id}' from Proxmox. " + f"Data on the storage will NOT be deleted, but Proxmox will " + f"no longer manage it." + ), + "action": "Call remove_storage again with confirm=True to proceed.", + "storage_id": storage_id, + } + + if client.is_dry_run: + return client.dry_run_response("remove_storage", storage_id=storage_id) + + logger.warning("Removing storage configuration '%s'", storage_id) + + await client.api_call(client.api.storage(storage_id).delete) + + return { + "status": "success", + "storage_id": storage_id, + "message": ( + f"Storage '{storage_id}' has been removed from Proxmox configuration. " + f"Underlying data has NOT been deleted." + ), + } + except Exception as e: + logger.error("Failed to remove storage '%s': %s", storage_id, e) + return format_error_response(e) + + +VALID_DOWNLOAD_CONTENT = frozenset({"iso", "vztmpl"}) + + +@mcp.tool() +async def download_to_storage( + node: str, + storage: str, + url: str, + content: str, + filename: str, + verify_certificates: bool = True, +) -> dict: + """Download an ISO or container template from a URL to storage. + + Args: + node: The node to download on. + storage: Target storage (e.g. 'local'). + url: URL to download from. + content: Content type - 'iso' or 'vztmpl'. + filename: Filename to save as (e.g. 'ubuntu-24.04.iso'). + verify_certificates: Verify SSL certificates (default True). + """ + try: + validate_node_name(node) + client = get_client() + client.validate_node(node) + if content not in VALID_DOWNLOAD_CONTENT: + return format_error_response( + Exception( + f"Invalid content type '{content}'. " + f"Must be 'iso' or 'vztmpl'." + ) + ) + if client.is_dry_run: + return client.dry_run_response( + "download_to_storage", + node=node, storage=storage, url=url, filename=filename, + ) + kwargs = { + "url": url, + "content": content, + "filename": filename, + } + if not verify_certificates: + kwargs["verify-certificates"] = 0 + logger.info( + "Downloading %s to '%s' on node '%s' from %s", + content, storage, node, url, + ) + upid = await client.api_call( + client.api.nodes(node).storage(storage)("download-url").post, + **kwargs, + ) + return format_task_result({"data": upid}) + except Exception as e: + logger.error("Failed to download to storage '%s': %s", storage, e) return format_error_response(e) diff --git a/src/proxmox_mcp/tools/task.py b/src/proxmox_mcp/tools/task.py index 1f8e3f9..bc2dce2 100644 --- a/src/proxmox_mcp/tools/task.py +++ b/src/proxmox_mcp/tools/task.py @@ -2,7 +2,8 @@ import asyncio import logging -from proxmox_mcp.utils.errors import format_error_response, TaskTimeoutError + +from proxmox_mcp.utils.errors import TaskTimeoutError, format_error_response from proxmox_mcp.utils.validators import validate_node_name logger = logging.getLogger("proxmox-mcp") @@ -10,11 +11,13 @@ def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp @@ -40,12 +43,10 @@ async def list_tasks( if node: validate_node_name(node) client.validate_node(node) - logger.info(f"Listing tasks for node '{node}' (limit={limit})") - data = await client.api_call( - client.api.nodes(node).tasks.get, limit=limit - ) + logger.info("Listing tasks for node '%s' (limit=%d)", node, limit) + data = await client.api_call(client.api.nodes(node).tasks.get, limit=limit) else: - logger.info(f"Listing tasks across all nodes (limit={limit})") + logger.info("Listing tasks across all nodes (limit=%d)", limit) # Get all nodes, then query tasks from each nodes_data = await client.api_call(client.api.nodes.get) data = [] @@ -57,7 +58,7 @@ async def list_tasks( ) data.extend(node_tasks) except Exception as e: - logger.warning(f"Failed to get tasks from node '{node_name}': {e}") + logger.warning("Failed to get tasks from node '%s': %s", node_name, e) # Apply status filter if provided if status_filter: @@ -84,7 +85,7 @@ async def list_tasks( "tasks": data, } except Exception as e: - logger.error(f"Failed to list tasks: {e}") + logger.error("Failed to list tasks: %s", e) return format_error_response(e) @@ -100,10 +101,8 @@ async def get_task_status(node: str, upid: str) -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching task status for UPID '{upid}' on node '{node}'") - data = await client.api_call( - client.api.nodes(node).tasks(upid).status.get - ) + logger.info("Fetching task status for UPID '%s' on node '%s'", upid, node) + data = await client.api_call(client.api.nodes(node).tasks(upid).status.get) return { "status": "success", @@ -112,7 +111,7 @@ async def get_task_status(node: str, upid: str) -> dict: "task_status": data, } except Exception as e: - logger.error(f"Failed to get task status for '{upid}': {e}") + logger.error("Failed to get task status for '%s': %s", upid, e) return format_error_response(e) @@ -129,10 +128,8 @@ async def get_task_log(node: str, upid: str, limit: int = 100) -> dict: validate_node_name(node) client = get_client() client.validate_node(node) - logger.info(f"Fetching task log for UPID '{upid}' on node '{node}'") - data = await client.api_call( - client.api.nodes(node).tasks(upid).log.get, limit=limit - ) + logger.info("Fetching task log for UPID '%s' on node '%s'", upid, node) + data = await client.api_call(client.api.nodes(node).tasks(upid).log.get, limit=limit) return { "status": "success", @@ -142,14 +139,12 @@ async def get_task_log(node: str, upid: str, limit: int = 100) -> dict: "log": data, } except Exception as e: - logger.error(f"Failed to get task log for '{upid}': {e}") + logger.error("Failed to get task log for '%s': %s", upid, e) return format_error_response(e) @mcp.tool() -async def wait_for_task( - node: str, upid: str, timeout: int = 300, poll_interval: int = 5 -) -> dict: +async def wait_for_task(node: str, upid: str, timeout: int = 300, poll_interval: int = 5) -> dict: """Wait for a Proxmox task to complete, polling at regular intervals. Args: @@ -165,19 +160,17 @@ async def wait_for_task( client = get_client() client.validate_node(node) logger.info( - f"Waiting for task '{upid}' on node '{node}' " - f"(timeout={timeout}s, poll_interval={poll_interval}s)" + "Waiting for task '%s' on node '%s' (timeout=%ds, poll_interval=%ds)", + upid, node, timeout, poll_interval, ) elapsed = 0 while elapsed < timeout: - data = await client.api_call( - client.api.nodes(node).tasks(upid).status.get - ) + data = await client.api_call(client.api.nodes(node).tasks(upid).status.get) task_status = data.get("status", "") if task_status != "running": - logger.info(f"Task '{upid}' completed with status: {task_status}") + logger.info("Task '%s' completed with status: %s", upid, task_status) return { "status": "success", "node": node, @@ -189,15 +182,16 @@ async def wait_for_task( await asyncio.sleep(poll_interval) elapsed += poll_interval - raise TaskTimeoutError( - f"Task '{upid}' did not complete within {timeout} seconds." - ) + raise TaskTimeoutError(f"Task '{upid}' did not complete within {timeout} seconds.") except TaskTimeoutError: - logger.warning(f"Task '{upid}' timed out after {timeout}s") + logger.warning("Task '%s' timed out after %ds", upid, timeout) return format_error_response( TaskTimeoutError(f"Task '{upid}' did not complete within {timeout} seconds."), - suggestion="Increase the timeout or check the task status manually with get_task_status.", + suggestion=( + "Increase the timeout or check the task status manually " + "with get_task_status." + ), ) except Exception as e: - logger.error(f"Failed while waiting for task '{upid}': {e}") + logger.error("Failed while waiting for task '%s': %s", upid, e) return format_error_response(e) diff --git a/src/proxmox_mcp/tools/vm.py b/src/proxmox_mcp/tools/vm.py index 9861ade..9d7a8c7 100644 --- a/src/proxmox_mcp/tools/vm.py +++ b/src/proxmox_mcp/tools/vm.py @@ -2,35 +2,62 @@ import json import logging + from proxmox_mcp.utils.errors import format_error_response -from proxmox_mcp.utils.validators import validate_vmid, validate_node_name -from proxmox_mcp.utils.formatters import format_vm_summary, format_task_result +from proxmox_mcp.utils.formatters import format_task_result, format_vm_summary +from proxmox_mcp.utils.validators import validate_node_name, validate_vmid logger = logging.getLogger("proxmox-mcp") +# Allowlist of VM config keys safe to modify via extra_config. +# Keys NOT in this set are rejected to prevent dangerous operations +# (e.g., hookscript for arbitrary code execution, hostpci for PCI passthrough). +VM_SAFE_CONFIG_KEYS = frozenset({ + # CPU & Memory + "cores", "sockets", "vcpus", "cpu", "cpulimit", "cpuunits", + "memory", "balloon", "shares", "numa", + # Boot & BIOS + "boot", "bootdisk", "bios", "machine", "ostype", + # Display + "vga", "tablet", + # Cloud-init + "ciuser", "cipassword", "citype", "cicustom", + "ipconfig0", "ipconfig1", "ipconfig2", "ipconfig3", + "nameserver", "searchdomain", "sshkeys", + # Description & Tags + "description", "tags", "name", "onboot", "startup", "protection", + # Agent + "agent", + # Hotplug + "hotplug", + # Disk + "ide0", "ide1", "ide2", "ide3", + "scsi0", "scsi1", "scsi2", "scsi3", + "virtio0", "virtio1", "virtio2", "virtio3", + "sata0", "sata1", "sata2", "sata3", "sata4", "sata5", + "efidisk0", "tpmstate0", + # Network + "net0", "net1", "net2", "net3", + # Misc safe + "kvm", "localtime", "freeze", "template", +}) + def get_client(): from proxmox_mcp.server import proxmox_client + return proxmox_client def get_mcp(): from proxmox_mcp.server import mcp + return mcp mcp = get_mcp() -async def _resolve_node(client, vmid: int, node: str | None) -> str: - """Resolve node for a VMID, auto-detecting if not provided.""" - if node: - validate_node_name(node) - client.validate_node(node) - return node - return await client.resolve_node_for_vmid(vmid) - - @mcp.tool() async def list_vms(node: str | None = None, status_filter: str | None = None) -> dict: """List all QEMU VMs across the cluster or on a specific node. @@ -67,7 +94,7 @@ async def get_vm_status(vmid: int, node: str | None = None) -> dict: try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) data = await client.api_call(client.api.nodes(node).qemu(vmid).status.current.get) return {"status": "success", "vmid": vmid, "node": node, "data": data} except Exception as e: @@ -85,7 +112,7 @@ async def get_vm_config(vmid: int, node: str | None = None) -> dict: try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) data = await client.api_call(client.api.nodes(node).qemu(vmid).config.get) return {"status": "success", "vmid": vmid, "node": node, "config": data} except Exception as e: @@ -93,9 +120,7 @@ async def get_vm_config(vmid: int, node: str | None = None) -> dict: @mcp.tool() -async def get_vm_rrd_data( - vmid: int, node: str | None = None, timeframe: str = "hour" -) -> dict: +async def get_vm_rrd_data(vmid: int, node: str | None = None, timeframe: str = "hour") -> dict: """Get VM performance metrics (CPU, memory, disk, network) over time. Args: @@ -106,11 +131,17 @@ async def get_vm_rrd_data( try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) data = await client.api_call( client.api.nodes(node).qemu(vmid).rrddata.get, timeframe=timeframe ) - return {"status": "success", "vmid": vmid, "node": node, "timeframe": timeframe, "data": data} + return { + "status": "success", + "vmid": vmid, + "node": node, + "timeframe": timeframe, + "data": data, + } except Exception as e: return format_error_response(e) @@ -127,7 +158,8 @@ async def start_vm(vmid: int, node: str | None = None, timeout: int = 60) -> dic try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + client.check_protected(vmid) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("start_vm", vmid=vmid, node=node) logger.info("Starting VM %d on %s", vmid, node) @@ -149,7 +181,7 @@ async def stop_vm(vmid: int, node: str | None = None) -> dict: client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("stop_vm", vmid=vmid, node=node) logger.warning("Hard stopping VM %d on %s", vmid, node) @@ -172,7 +204,7 @@ async def shutdown_vm(vmid: int, node: str | None = None, timeout: int = 120) -> client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("shutdown_vm", vmid=vmid, node=node) logger.info("Graceful shutdown of VM %d on %s", vmid, node) @@ -196,7 +228,7 @@ async def reboot_vm(vmid: int, node: str | None = None) -> dict: client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("reboot_vm", vmid=vmid, node=node) logger.info("Rebooting VM %d on %s", vmid, node) @@ -218,7 +250,7 @@ async def suspend_vm(vmid: int, node: str | None = None) -> dict: client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("suspend_vm", vmid=vmid, node=node) logger.info("Suspending VM %d on %s", vmid, node) @@ -240,7 +272,7 @@ async def resume_vm(vmid: int, node: str | None = None) -> dict: client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("resume_vm", vmid=vmid, node=node) logger.info("Resuming VM %d on %s", vmid, node) @@ -262,7 +294,7 @@ async def reset_vm(vmid: int, node: str | None = None) -> dict: client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("reset_vm", vmid=vmid, node=node) logger.warning("Hard resetting VM %d on %s", vmid, node) @@ -297,7 +329,7 @@ async def clone_vm( client = get_client() validate_vmid(vmid) validate_vmid(newid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("clone_vm", vmid=vmid, newid=newid, name=name, node=node) kwargs = {"newid": newid, "name": name, "full": 1 if full else 0} @@ -327,13 +359,14 @@ async def migrate_vm( try: client = get_client() validate_vmid(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("migrate_vm", vmid=vmid, target=target_node, node=node) logger.info("Migrating VM %d from %s to %s (online=%s)", vmid, node, target_node, online) upid = await client.api_call( client.api.nodes(node).qemu(vmid).migrate.post, - target=target_node, online=1 if online else 0, + target=target_node, + online=1 if online else 0, ) return format_task_result({"data": upid}) except Exception as e: @@ -422,7 +455,7 @@ async def delete_vm( client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if not confirm: vm_data = await client.api_call(client.api.nodes(node).qemu(vmid).status.current.get) return { @@ -432,7 +465,12 @@ async def delete_vm( f"All disks and data will be destroyed. This cannot be undone." ), "action": "Call delete_vm again with confirm=True to proceed.", - "vm_info": {"vmid": vmid, "name": vm_data.get("name"), "node": node, "status": vm_data.get("status")}, + "vm_info": { + "vmid": vmid, + "name": vm_data.get("name"), + "node": node, + "status": vm_data.get("status"), + }, } if client.is_dry_run: return client.dry_run_response("delete_vm", vmid=vmid, node=node) @@ -448,6 +486,90 @@ async def delete_vm( return format_error_response(e) +@mcp.tool() +async def resize_vm_disk( + vmid: int, + disk: str, + size: str, + node: str | None = None, +) -> dict: + """Resize a VM disk. Can only grow disks, not shrink. + + Args: + vmid: The VM ID. + disk: Disk name (e.g. 'scsi0', 'virtio0', 'ide0', 'sata0'). + size: New absolute size (e.g. '50G') or relative increase (e.g. '+10G'). + node: The node name. Auto-detected if omitted. + """ + try: + client = get_client() + validate_vmid(vmid) + client.check_protected(vmid) + node = await client.resolve_node(vmid, node) + if client.is_dry_run: + return client.dry_run_response( + "resize_vm_disk", vmid=vmid, disk=disk, size=size, node=node + ) + logger.info("Resizing disk '%s' on VM %d to %s", disk, vmid, size) + await client.api_call( + client.api.nodes(node).qemu(vmid).resize.put, disk=disk, size=size + ) + return { + "status": "success", + "vmid": vmid, + "node": node, + "disk": disk, + "size": size, + } + except Exception as e: + return format_error_response(e) + + +@mcp.tool() +async def convert_vm_to_template( + vmid: int, + node: str | None = None, + confirm: bool = False, +) -> dict: + """Convert a VM to a template. This is irreversible. Set confirm=True to execute. + + Args: + vmid: The VM ID to convert. + node: The node name. Auto-detected if omitted. + confirm: Must be True to execute. False returns a confirmation prompt. + """ + try: + client = get_client() + validate_vmid(vmid) + client.check_protected(vmid) + node = await client.resolve_node(vmid, node) + if not confirm: + return { + "status": "confirmation_required", + "warning": ( + f"This will convert VM {vmid} to a template. " + f"This action is IRREVERSIBLE. The VM will no longer be startable." + ), + "action": "Call convert_vm_to_template again with confirm=True.", + } + if client.is_dry_run: + return client.dry_run_response( + "convert_vm_to_template", vmid=vmid, node=node + ) + logger.warning("Converting VM %d to template on %s", vmid, node) + await client.api_call( + client.api.nodes(node).qemu(vmid).template.post + ) + return { + "status": "success", + "vmid": vmid, + "node": node, + "message": f"VM {vmid} has been converted to a template.", + } + except Exception as e: + return format_error_response(e) + + @mcp.tool() async def modify_vm_config( vmid: int, @@ -483,7 +605,7 @@ async def modify_vm_config( client = get_client() validate_vmid(vmid) client.check_protected(vmid) - node = await _resolve_node(client, vmid, node) + node = await client.resolve_node(vmid, node) if client.is_dry_run: return client.dry_run_response("modify_vm_config", vmid=vmid, node=node) kwargs = {} @@ -507,13 +629,21 @@ async def modify_vm_config( kwargs["tags"] = tags if extra_config: extra = json.loads(extra_config) - # Prevent overriding safety-relevant parameters - blocked_keys = {"vmid", "node", "digest"} - extra = {k: v for k, v in extra.items() if k not in blocked_keys} + unsafe_keys = [k for k in extra if k not in VM_SAFE_CONFIG_KEYS] + if unsafe_keys: + return format_error_response( + Exception( + f"Keys not in allowlist: {unsafe_keys}. " + f"Modifying these keys is restricted for safety." + ) + ) kwargs.update(extra) if not kwargs: - return {"status": "error", "error_type": "InvalidParameterError", - "message": "No configuration changes specified."} + return { + "status": "error", + "error_type": "InvalidParameterError", + "message": "No configuration changes specified.", + } logger.info("Modifying VM %d config on %s: %s", vmid, node, list(kwargs.keys())) await client.api_call(client.api.nodes(node).qemu(vmid).config.put, **kwargs) return {"status": "success", "vmid": vmid, "node": node, "changes": list(kwargs.keys())} @@ -524,3 +654,102 @@ async def modify_vm_config( ) except Exception as e: return format_error_response(e) + + +@mcp.tool() +async def set_vm_cloudinit( + vmid: int, + node: str | None = None, + ciuser: str | None = None, + cipassword: str | None = None, + sshkeys: str | None = None, + ipconfig0: str | None = None, + ipconfig1: str | None = None, + nameserver: str | None = None, + searchdomain: str | None = None, +) -> dict: + """Configure cloud-init settings for a VM. + + Args: + vmid: The VM ID. + node: The node name. Auto-detected if omitted. + ciuser: Default user name. + cipassword: Password for the default user. + sshkeys: URL-encoded SSH public keys (newline-separated). + ipconfig0: IP config for first interface (e.g. 'ip=dhcp' or + 'ip=10.0.0.5/24,gw=10.0.0.1'). + ipconfig1: IP config for second interface. + nameserver: DNS server(s), space-separated. + searchdomain: DNS search domain(s), space-separated. + """ + try: + client = get_client() + validate_vmid(vmid) + client.check_protected(vmid) + node = await client.resolve_node(vmid, node) + if client.is_dry_run: + return client.dry_run_response("set_vm_cloudinit", vmid=vmid, node=node) + kwargs: dict = {} + if ciuser is not None: + kwargs["ciuser"] = ciuser + if cipassword is not None: + kwargs["cipassword"] = cipassword + if sshkeys is not None: + kwargs["sshkeys"] = sshkeys + if ipconfig0 is not None: + kwargs["ipconfig0"] = ipconfig0 + if ipconfig1 is not None: + kwargs["ipconfig1"] = ipconfig1 + if nameserver is not None: + kwargs["nameserver"] = nameserver + if searchdomain is not None: + kwargs["searchdomain"] = searchdomain + if not kwargs: + return format_error_response( + Exception("No cloud-init settings specified.") + ) + logger.info("Setting cloud-init on VM %d: %s", vmid, list(kwargs.keys())) + await client.api_call( + client.api.nodes(node).qemu(vmid).config.put, **kwargs + ) + return { + "status": "success", + "vmid": vmid, + "node": node, + "changes": list(kwargs.keys()), + } + except Exception as e: + return format_error_response(e) + + +@mcp.tool() +async def regenerate_cloudinit_image(vmid: int, node: str | None = None) -> dict: + """Regenerate the cloud-init image for a VM. + + Call this after changing cloud-init settings to apply them. + + Args: + vmid: The VM ID. + node: The node name. Auto-detected if omitted. + """ + try: + client = get_client() + validate_vmid(vmid) + client.check_protected(vmid) + node = await client.resolve_node(vmid, node) + if client.is_dry_run: + return client.dry_run_response( + "regenerate_cloudinit_image", vmid=vmid, node=node + ) + logger.info("Regenerating cloud-init image for VM %d on %s", vmid, node) + await client.api_call( + client.api.nodes(node).qemu(vmid).cloudinit.post + ) + return { + "status": "success", + "vmid": vmid, + "node": node, + "message": "Cloud-init image regenerated.", + } + except Exception as e: + return format_error_response(e) diff --git a/src/proxmox_mcp/utils/errors.py b/src/proxmox_mcp/utils/errors.py index b839368..5b31834 100644 --- a/src/proxmox_mcp/utils/errors.py +++ b/src/proxmox_mcp/utils/errors.py @@ -45,6 +45,22 @@ class InvalidParameterError(ProxmoxMCPError): """Invalid parameter value provided.""" +class SSHExecutionError(ProxmoxMCPError): + """SSH command execution failed.""" + + +class SafetyGateError(ProxmoxMCPError): + """A safety gate check prevented the operation.""" + + +class DeviceNotFoundError(ProxmoxMCPError): + """Block device not found on the node.""" + + +class DeviceInUseError(ProxmoxMCPError): + """Block device or partition is currently in use.""" + + def format_error_response(error: Exception, suggestion: str | None = None) -> dict: """Format any exception into a structured error response dict.""" result = { diff --git a/src/proxmox_mcp/utils/formatters.py b/src/proxmox_mcp/utils/formatters.py index defb44e..fde6290 100644 --- a/src/proxmox_mcp/utils/formatters.py +++ b/src/proxmox_mcp/utils/formatters.py @@ -12,7 +12,7 @@ def format_vm_summary(vm_data: dict) -> dict: "cpu_cores": vm_data.get("maxcpu", 0), "memory_mb": vm_data.get("maxmem", 0) // (1024 * 1024), "memory_used_mb": vm_data.get("mem", 0) // (1024 * 1024), - "disk_gb": vm_data.get("maxdisk", 0) // (1024 ** 3), + "disk_gb": vm_data.get("maxdisk", 0) // (1024**3), "uptime_seconds": vm_data.get("uptime", 0), "cpu_usage_percent": round(vm_data.get("cpu", 0) * 100, 2), "tags": vm_data.get("tags", "").split(";") if vm_data.get("tags") else [], @@ -30,7 +30,7 @@ def format_container_summary(ct_data: dict) -> dict: "cpu_cores": ct_data.get("maxcpu", 0), "memory_mb": ct_data.get("maxmem", 0) // (1024 * 1024), "memory_used_mb": ct_data.get("mem", 0) // (1024 * 1024), - "disk_gb": ct_data.get("maxdisk", 0) // (1024 ** 3), + "disk_gb": ct_data.get("maxdisk", 0) // (1024**3), "uptime_seconds": ct_data.get("uptime", 0), "cpu_usage_percent": round(ct_data.get("cpu", 0) * 100, 2), "tags": ct_data.get("tags", "").split(";") if ct_data.get("tags") else [], @@ -59,5 +59,8 @@ def format_task_result(task_data: dict) -> dict: return { "task_upid": task_data.get("upid") or task_data.get("data"), "status": "submitted", - "message": "Task submitted successfully. Use get_task_status with the UPID to track progress.", + "message": ( + "Task submitted successfully. " + "Use get_task_status with the UPID to track progress." + ), } diff --git a/src/proxmox_mcp/utils/sanitizers.py b/src/proxmox_mcp/utils/sanitizers.py new file mode 100644 index 0000000..e3ef7f3 --- /dev/null +++ b/src/proxmox_mcp/utils/sanitizers.py @@ -0,0 +1,237 @@ +"""Input sanitization and validation for disk management tools.""" + +import re + +from proxmox_mcp.utils.errors import InvalidParameterError + +# Strict patterns for shell-interpolated values +# Whole disks: /dev/sda, /dev/vdb, /dev/nvme0n1 (NOT /dev/sdb1, /dev/nvme0n1p1) +DEVICE_PATH_RE = re.compile(r"^/dev/(?:[a-z]+|nvme\d+n\d+)$") +# Partitions: /dev/sdb1, /dev/nvme0n1p1, or whole disks +PARTITION_PATH_RE = re.compile(r"^/dev/(?:[a-z]+\d*|nvme\d+n\d+(?:p\d+)?)$") +MOUNT_PATH_RE = re.compile(r"^/(mnt|srv|media)/[a-zA-Z0-9_][a-zA-Z0-9_/\-]*$") +STORAGE_ID_RE = re.compile(r"^[a-zA-Z][a-zA-Z0-9_\-]{0,63}$") +LABEL_RE = re.compile(r"^[a-zA-Z0-9_\-]{0,16}$") +SAFE_OPTION_RE = re.compile(r"^[a-zA-Z0-9_=,.\- ]+$") + +# Shell metacharacters that must never appear in parameters +SHELL_METACHAR_RE = re.compile(r"[;|&$`\\'\"\(\)\{\}<>!~#\n\r]") + +# Allowed mount options (validated individually) +ALLOWED_MOUNT_OPTIONS = frozenset( + { + "defaults", + "noatime", + "relatime", + "noexec", + "nosuid", + "nodev", + "ro", + "rw", + "nofail", + "discard", + "barrier=0", + "barrier=1", + "data=ordered", + "data=writeback", + "data=journal", + "errors=remount-ro", + "errors=continue", + "x-systemd.automount", + } +) + +# Allowed mkfs options (flag + optional value) +ALLOWED_MKFS_FLAGS = frozenset( + { + "-m", + "-L", + "-n", + "-b", + "-i", + "-N", + "-O", + "-T", + "-E", + "-f", + } +) + +# System-critical mount paths that must never be targets +CRITICAL_PATHS = frozenset( + { + "/", + "/etc", + "/var", + "/home", + "/usr", + "/boot", + "/tmp", + "/root", + "/opt", + "/lib", + "/bin", + "/sbin", + "/proc", + "/sys", + "/dev", + "/run", + "/boot/efi", + "/var/log", + "/var/lib", + } +) + +UUID_RE = re.compile( + r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" +) + +SNAPNAME_RE = re.compile(r"^[a-zA-Z][a-zA-Z0-9_\-\.]{0,39}$") + +VALID_FILESYSTEMS = frozenset({"ext4", "xfs", "vfat"}) +VALID_PARTITION_TABLES = frozenset({"gpt", "msdos"}) + + +def check_shell_injection(value: str, param_name: str) -> None: + """Reject any value containing shell metacharacters.""" + if SHELL_METACHAR_RE.search(value): + raise InvalidParameterError( + f"Parameter '{param_name}' contains forbidden characters. " + f"Shell metacharacters are not allowed." + ) + + +def validate_device_path(device: str, *, allow_partition: bool = False) -> None: + """Validate a block device path. + + Args: + device: Device path like /dev/sdb or /dev/sdb1. + allow_partition: If True, allow partition suffixes (e.g., /dev/sdb1). + """ + check_shell_injection(device, "device") + pattern = PARTITION_PATH_RE if allow_partition else DEVICE_PATH_RE + if not pattern.match(device): + if allow_partition: + raise InvalidParameterError( + f"Device path '{device}' is invalid. " + f"Must match /dev/[name] or /dev/[name][number] (e.g., /dev/sdb, /dev/sdb1)." + ) + raise InvalidParameterError( + f"Device path '{device}' is invalid. " + f"Must be a whole disk like /dev/sdb (no partition numbers)." + ) + + +def validate_mount_path(path: str) -> None: + """Validate a mount point path is safe.""" + check_shell_injection(path, "mount_path") + + if ".." in path: + raise InvalidParameterError( + f"Mount path '{path}' contains path traversal (..) which is not allowed." + ) + + if not MOUNT_PATH_RE.match(path): + raise InvalidParameterError( + f"Mount path '{path}' is invalid. " + f"Must be under /mnt/, /srv/, or /media/ " + f"(e.g., /mnt/data, /srv/storage)." + ) + + # Check against critical system paths + normalized = path.rstrip("/") + if normalized in CRITICAL_PATHS: + raise InvalidParameterError( + f"Mount path '{path}' is a critical system path and cannot be used." + ) + + +def validate_storage_id(storage_id: str) -> None: + """Validate a Proxmox storage identifier.""" + check_shell_injection(storage_id, "storage_id") + if not STORAGE_ID_RE.match(storage_id): + raise InvalidParameterError( + f"Storage ID '{storage_id}' is invalid. " + f"Must start with a letter, contain only alphanumeric/hyphens/underscores, " + f"and be at most 64 characters." + ) + + +def validate_label(label: str) -> None: + """Validate a filesystem label.""" + check_shell_injection(label, "label") + if not LABEL_RE.match(label): + raise InvalidParameterError( + f"Label '{label}' is invalid. " + f"Must be alphanumeric with hyphens/underscores, max 16 characters." + ) + + +def validate_mount_options(options: str) -> None: + """Validate mount options against the allowlist.""" + check_shell_injection(options, "mount_options") + parts = [opt.strip() for opt in options.split(",") if opt.strip()] + + for opt in parts: + # Handle commit=N pattern + if re.match(r"^commit=\d+$", opt): + continue + if opt not in ALLOWED_MOUNT_OPTIONS: + raise InvalidParameterError( + f"Mount option '{opt}' is not allowed. " + f"Allowed options: {', '.join(sorted(ALLOWED_MOUNT_OPTIONS))}" + ) + + +def validate_mkfs_options(options: str) -> None: + """Validate mkfs options against the allowlist.""" + check_shell_injection(options, "options") + if not SAFE_OPTION_RE.match(options): + raise InvalidParameterError( + "mkfs options contain invalid characters. Only alphanumeric, =, -, . allowed." + ) + # Split by spaces and check each flag + tokens = options.split() + for token in tokens: + if token.startswith("-"): + # Extract the flag part (e.g., "-m" from "-m 1") + flag = re.match(r"^(-[a-zA-Z])", token) + if not flag or flag.group(1) not in ALLOWED_MKFS_FLAGS: + raise InvalidParameterError(f"mkfs flag '{token}' is not allowed.") + + +def validate_filesystem(filesystem: str) -> None: + """Validate filesystem type.""" + if filesystem not in VALID_FILESYSTEMS: + raise InvalidParameterError( + f"Filesystem '{filesystem}' is not supported. " + f"Supported: {', '.join(sorted(VALID_FILESYSTEMS))}" + ) + + +def validate_partition_table(table_type: str) -> None: + """Validate partition table type.""" + if table_type not in VALID_PARTITION_TABLES: + raise InvalidParameterError( + f"Partition table type '{table_type}' is not supported. " + f"Supported: {', '.join(sorted(VALID_PARTITION_TABLES))}" + ) + + +def validate_snapname(name: str) -> None: + """Validate snapshot name matches Proxmox requirements.""" + if not SNAPNAME_RE.match(name): + raise InvalidParameterError( + f"Invalid snapshot name '{name}'. Must start with a letter, " + "contain only [a-zA-Z0-9_-.], and be 1-40 characters." + ) + + +def validate_uuid(uuid_str: str) -> str: + """Validate a UUID string format. Returns the validated UUID.""" + if not UUID_RE.match(uuid_str): + raise InvalidParameterError( + f"Invalid UUID format: {uuid_str!r}. " + f"Expected format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + ) + return uuid_str diff --git a/src/proxmox_mcp/utils/validators.py b/src/proxmox_mcp/utils/validators.py index 44d0ee3..d8379e8 100644 --- a/src/proxmox_mcp/utils/validators.py +++ b/src/proxmox_mcp/utils/validators.py @@ -1,6 +1,7 @@ """Input validation helpers.""" import re + from proxmox_mcp.utils.errors import InvalidParameterError diff --git a/tests/conftest.py b/tests/conftest.py index cb4a9b6..430bc6f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -48,7 +48,6 @@ def mock_config(): config.PROXMOX_DRY_RUN = False config.PROXMOX_ALLOWED_NODES = "" config.PROXMOX_PROTECTED_VMIDS = "" - config.PROXMOX_MAX_CONCURRENT_TASKS = 5 config.MCP_TRANSPORT = "stdio" config.MCP_HTTP_PORT = 3001 config.LOG_LEVEL = "INFO" diff --git a/tests/test_backup_tools.py b/tests/test_backup_tools.py index 42b689e..4546f35 100644 --- a/tests/test_backup_tools.py +++ b/tests/test_backup_tools.py @@ -1,7 +1,8 @@ """Tests for backup and snapshot tools.""" +from unittest.mock import AsyncMock, MagicMock, patch + import pytest -from unittest.mock import MagicMock, patch, AsyncMock @pytest.fixture @@ -12,6 +13,7 @@ def mock_client(): client.config.PROXMOX_PROTECTED_VMIDS = [] client.is_dry_run = False client.resolve_node_for_vmid = AsyncMock(return_value="pve1") + client.resolve_node = AsyncMock(return_value="pve1") mock_get.return_value = client yield client @@ -19,6 +21,7 @@ def mock_client(): @pytest.mark.asyncio async def test_create_snapshot(mock_client): from proxmox_mcp.tools.backup import create_snapshot + mock_client.api_call = AsyncMock(return_value="UPID:pve1:snap") result = await create_snapshot(vmid=100, snapname="before-upgrade") assert result["status"] == "submitted" @@ -27,9 +30,12 @@ async def test_create_snapshot(mock_client): @pytest.mark.asyncio async def test_list_snapshots(mock_client): from proxmox_mcp.tools.backup import list_snapshots - mock_client.api_call = AsyncMock(return_value=[ - {"name": "snap1", "description": "test", "snaptime": 1700000000}, - ]) + + mock_client.api_call = AsyncMock( + return_value=[ + {"name": "snap1", "description": "test", "snaptime": 1700000000}, + ] + ) result = await list_snapshots(vmid=100) assert result["status"] == "success" @@ -37,6 +43,7 @@ async def test_list_snapshots(mock_client): @pytest.mark.asyncio async def test_rollback_snapshot_requires_confirm(mock_client): from proxmox_mcp.tools.backup import rollback_snapshot + result = await rollback_snapshot(vmid=100, snapname="snap1") assert result["status"] == "confirmation_required" @@ -44,6 +51,7 @@ async def test_rollback_snapshot_requires_confirm(mock_client): @pytest.mark.asyncio async def test_create_backup(mock_client): from proxmox_mcp.tools.backup import create_backup + mock_client.api_call = AsyncMock(return_value="UPID:pve1:vzdump") result = await create_backup(vmid=100) assert result["status"] == "submitted" @@ -52,8 +60,74 @@ async def test_create_backup(mock_client): @pytest.mark.asyncio async def test_list_backups(mock_client): from proxmox_mcp.tools.backup import list_backups - mock_client.api_call = AsyncMock(return_value=[ - {"volid": "local:backup/vm-100.vma.zst", "size": 1073741824}, - ]) + + mock_client.api_call = AsyncMock( + return_value=[ + {"volid": "local:backup/vm-100.vma.zst", "size": 1073741824}, + ] + ) result = await list_backups(node="pve1") assert result["status"] == "success" + + +# --- list_backup_jobs --- + + +@pytest.mark.asyncio +async def test_list_backup_jobs(mock_client): + from proxmox_mcp.tools.backup import list_backup_jobs + + mock_client.api_call = AsyncMock( + return_value=[ + {"id": "backup-001", "schedule": "0 2 * * *", "storage": "local"}, + ] + ) + result = await list_backup_jobs() + assert result["status"] == "success" + assert result["count"] == 1 + + +# --- create_backup_job --- + + +@pytest.mark.asyncio +async def test_create_backup_job(mock_client): + from proxmox_mcp.tools.backup import create_backup_job + + mock_client.api_call = AsyncMock(return_value=None) + result = await create_backup_job( + storage="local", schedule="0 2 * * *", vmid="100,101" + ) + assert result["status"] == "success" + assert result["schedule"] == "0 2 * * *" + + +@pytest.mark.asyncio +async def test_create_backup_job_dry_run(mock_client): + from proxmox_mcp.tools.backup import create_backup_job + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = {"status": "dry_run"} + result = await create_backup_job(storage="local", schedule="0 3 * * *") + assert result["status"] == "dry_run" + + +# --- delete_backup_job --- + + +@pytest.mark.asyncio +async def test_delete_backup_job_requires_confirm(mock_client): + from proxmox_mcp.tools.backup import delete_backup_job + + result = await delete_backup_job(job_id="backup-001") + assert result["status"] == "confirmation_required" + + +@pytest.mark.asyncio +async def test_delete_backup_job_confirmed(mock_client): + from proxmox_mcp.tools.backup import delete_backup_job + + mock_client.api_call = AsyncMock(return_value=None) + result = await delete_backup_job(job_id="backup-001", confirm=True) + assert result["status"] == "success" + assert result["job_id"] == "backup-001" diff --git a/tests/test_client.py b/tests/test_client.py index 5ead53c..d30b561 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,11 +1,14 @@ -import pytest from unittest.mock import MagicMock, patch + +import pytest + from proxmox_mcp.client import ProxmoxClient from proxmox_mcp.config import ProxmoxConfig from proxmox_mcp.utils.errors import ( - VMNotFoundError, - ProtectedResourceError, + InvalidParameterError, NodeNotAllowedError, + ProtectedResourceError, + VMNotFoundError, ) @@ -78,6 +81,37 @@ def test_dry_run_response(client): assert result["params"]["vmid"] == 100 +@pytest.mark.asyncio +async def test_resolve_node_with_explicit_node(client): + node = await client.resolve_node(100, "pve1") + assert node == "pve1" + + +@pytest.mark.asyncio +async def test_resolve_node_auto_detect(client): + client._api.cluster.resources.get.return_value = [ + {"vmid": 100, "node": "pve2", "type": "qemu"}, + ] + node = await client.resolve_node(100, None) + assert node == "pve2" + + +@pytest.mark.asyncio +async def test_resolve_node_invalid_name(client): + with pytest.raises(InvalidParameterError): + await client.resolve_node(100, "bad node!") + + +@pytest.mark.asyncio +async def test_resolve_node_disallowed_node(mock_config, monkeypatch): + monkeypatch.setenv("PROXMOX_ALLOWED_NODES", "pve1,pve2") + config = ProxmoxConfig() + with patch("proxmox_mcp.client.ProxmoxAPI"): + c = ProxmoxClient(config) + with pytest.raises(NodeNotAllowedError): + await c.resolve_node(100, "pve3") + + @pytest.mark.asyncio async def test_client_init_token_auth(mock_config): with patch("proxmox_mcp.client.ProxmoxAPI") as mock_cls: diff --git a/tests/test_cluster_tools.py b/tests/test_cluster_tools.py index abb7cee..3b5524a 100644 --- a/tests/test_cluster_tools.py +++ b/tests/test_cluster_tools.py @@ -1,7 +1,8 @@ """Tests for cluster tools.""" +from unittest.mock import AsyncMock, MagicMock, patch + import pytest -from unittest.mock import MagicMock, AsyncMock, patch @pytest.fixture @@ -14,6 +15,7 @@ def mock_client(): # --- get_cluster_status --- + @pytest.mark.asyncio async def test_get_cluster_status(mock_client): from proxmox_mcp.tools.cluster import get_cluster_status @@ -72,6 +74,7 @@ async def test_get_cluster_status_error(mock_client): # --- get_cluster_resources --- + @pytest.mark.asyncio async def test_get_cluster_resources_all(mock_client): from proxmox_mcp.tools.cluster import get_cluster_resources @@ -119,6 +122,7 @@ async def test_get_cluster_resources_error(mock_client): # --- get_cluster_log --- + @pytest.mark.asyncio async def test_get_cluster_log(mock_client): from proxmox_mcp.tools.cluster import get_cluster_log @@ -150,6 +154,7 @@ async def test_get_cluster_log_error(mock_client): # --- get_next_vmid --- + @pytest.mark.asyncio async def test_get_next_vmid(mock_client): from proxmox_mcp.tools.cluster import get_next_vmid @@ -178,6 +183,7 @@ async def test_get_next_vmid_error(mock_client): # --- list_pools --- + @pytest.mark.asyncio async def test_list_pools(mock_client): from proxmox_mcp.tools.cluster import list_pools @@ -219,3 +225,315 @@ async def test_list_pools_error(mock_client): result = await list_pools() assert result["status"] == "error" + + +# --- create_pool --- + + +@pytest.mark.asyncio +async def test_create_pool(mock_client): + from proxmox_mcp.tools.cluster import create_pool + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await create_pool("dev-team", comment="Development team pool") + + assert result["status"] == "success" + assert result["poolid"] == "dev-team" + + +@pytest.mark.asyncio +async def test_create_pool_dry_run(mock_client): + from proxmox_mcp.tools.cluster import create_pool + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = {"status": "dry_run"} + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await create_pool("dev-team") + + assert result["status"] == "dry_run" + + +# --- modify_pool --- + + +@pytest.mark.asyncio +async def test_modify_pool_add_vms(mock_client): + from proxmox_mcp.tools.cluster import modify_pool + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await modify_pool("dev-team", vms="100,101") + + assert result["status"] == "success" + assert "vms" in result["changes"] + + +@pytest.mark.asyncio +async def test_modify_pool_no_changes(mock_client): + from proxmox_mcp.tools.cluster import modify_pool + + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await modify_pool("dev-team") + + assert result["status"] == "error" + + +# --- delete_pool --- + + +@pytest.mark.asyncio +async def test_delete_pool_requires_confirm(mock_client): + from proxmox_mcp.tools.cluster import delete_pool + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await delete_pool("dev-team") + + assert result["status"] == "confirmation_required" + + +@pytest.mark.asyncio +async def test_delete_pool_confirmed(mock_client): + from proxmox_mcp.tools.cluster import delete_pool + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await delete_pool("dev-team", confirm=True) + + assert result["status"] == "success" + assert result["poolid"] == "dev-team" + + +@pytest.mark.asyncio +async def test_delete_pool_error(mock_client): + from proxmox_mcp.tools.cluster import delete_pool + + mock_client.api_call.side_effect = Exception("pool not empty") + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await delete_pool("dev-team", confirm=True) + + assert result["status"] == "error" + + +# --- list_users --- + + +@pytest.mark.asyncio +async def test_list_users(mock_client): + from proxmox_mcp.tools.cluster import list_users + + mock_client.api_call.return_value = [ + {"userid": "root@pam", "enable": 1, "email": "root@example.com"}, + {"userid": "admin@pve", "enable": 1}, + ] + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await list_users() + + assert result["status"] == "success" + assert result["count"] == 2 + + +# --- create_user --- + + +@pytest.mark.asyncio +async def test_create_user(mock_client): + from proxmox_mcp.tools.cluster import create_user + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await create_user( + userid="john@pve", password="secret", email="john@example.com" + ) + + assert result["status"] == "success" + assert result["userid"] == "john@pve" + + +# --- delete_user --- + + +@pytest.mark.asyncio +async def test_delete_user_requires_confirm(mock_client): + from proxmox_mcp.tools.cluster import delete_user + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await delete_user("john@pve") + + assert result["status"] == "confirmation_required" + + +@pytest.mark.asyncio +async def test_delete_user_confirmed(mock_client): + from proxmox_mcp.tools.cluster import delete_user + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await delete_user("john@pve", confirm=True) + + assert result["status"] == "success" + + +# --- list_roles --- + + +@pytest.mark.asyncio +async def test_list_roles(mock_client): + from proxmox_mcp.tools.cluster import list_roles + + mock_client.api_call.return_value = [ + {"roleid": "PVEAdmin", "privs": "Sys.Audit,Sys.Modify"}, + {"roleid": "PVEVMUser", "privs": "VM.Audit,VM.Console"}, + ] + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await list_roles() + + assert result["status"] == "success" + assert result["count"] == 2 + + +# --- set_user_permission --- + + +@pytest.mark.asyncio +async def test_set_user_permission(mock_client): + from proxmox_mcp.tools.cluster import set_user_permission + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await set_user_permission( + path="/vms/100", roles="PVEVMUser", users="john@pve" + ) + + assert result["status"] == "success" + assert result["path"] == "/vms/100" + + +@pytest.mark.asyncio +async def test_set_user_permission_requires_users_or_groups(mock_client): + from proxmox_mcp.tools.cluster import set_user_permission + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await set_user_permission(path="/", roles="PVEAdmin") + + assert result["status"] == "error" + + +# --- list_ha_resources --- + + +@pytest.mark.asyncio +async def test_list_ha_resources(mock_client): + from proxmox_mcp.tools.cluster import list_ha_resources + + mock_client.api_call.return_value = [ + {"sid": "vm:100", "state": "started", "group": "ha-group1"}, + ] + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await list_ha_resources() + + assert result["status"] == "success" + assert result["count"] == 1 + + +# --- create_ha_resource --- + + +@pytest.mark.asyncio +async def test_create_ha_resource(mock_client): + from proxmox_mcp.tools.cluster import create_ha_resource + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await create_ha_resource(sid="vm:100", state="started") + + assert result["status"] == "success" + assert result["sid"] == "vm:100" + + +@pytest.mark.asyncio +async def test_create_ha_resource_invalid_state(mock_client): + from proxmox_mcp.tools.cluster import create_ha_resource + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await create_ha_resource(sid="vm:100", state="running") + + assert result["status"] == "error" + + +# --- modify_ha_resource --- + + +@pytest.mark.asyncio +async def test_modify_ha_resource(mock_client): + from proxmox_mcp.tools.cluster import modify_ha_resource + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await modify_ha_resource(sid="vm:100", state="stopped") + + assert result["status"] == "success" + assert "state" in result["changes"] + + +@pytest.mark.asyncio +async def test_modify_ha_resource_no_changes(mock_client): + from proxmox_mcp.tools.cluster import modify_ha_resource + + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await modify_ha_resource(sid="vm:100") + + assert result["status"] == "error" + + +# --- delete_ha_resource --- + + +@pytest.mark.asyncio +async def test_delete_ha_resource_requires_confirm(mock_client): + from proxmox_mcp.tools.cluster import delete_ha_resource + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await delete_ha_resource(sid="vm:100") + + assert result["status"] == "confirmation_required" + + +@pytest.mark.asyncio +async def test_delete_ha_resource_confirmed(mock_client): + from proxmox_mcp.tools.cluster import delete_ha_resource + + mock_client.api_call.return_value = None + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.cluster.get_client", return_value=mock_client): + result = await delete_ha_resource(sid="vm:100", confirm=True) + + assert result["status"] == "success" + assert result["sid"] == "vm:100" diff --git a/tests/test_config.py b/tests/test_config.py index 65974c0..ba7c64a 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,4 +1,6 @@ import pytest +from pydantic import ValidationError + from proxmox_mcp.config import ProxmoxConfig @@ -48,5 +50,5 @@ def test_config_empty_lists(monkeypatch): def test_config_requires_host(monkeypatch): monkeypatch.delenv("PROXMOX_HOST", raising=False) - with pytest.raises(Exception): - ProxmoxConfig() + with pytest.raises(ValidationError): + ProxmoxConfig(_env_file=None) diff --git a/tests/test_container_tools.py b/tests/test_container_tools.py index 6dc0183..d96fcc0 100644 --- a/tests/test_container_tools.py +++ b/tests/test_container_tools.py @@ -1,7 +1,8 @@ """Tests for container tools.""" +from unittest.mock import AsyncMock, MagicMock, patch + import pytest -from unittest.mock import MagicMock, patch, AsyncMock @pytest.fixture @@ -13,6 +14,7 @@ def mock_client(): client.config.PROXMOX_DRY_RUN = False client.is_dry_run = False client.resolve_node_for_vmid = AsyncMock(return_value="pve1") + client.resolve_node = AsyncMock(return_value="pve1") mock_get.return_value = client yield client @@ -20,11 +22,24 @@ def mock_client(): @pytest.mark.asyncio async def test_list_containers(mock_client): from proxmox_mcp.tools.container import list_containers - mock_client.api_call = AsyncMock(return_value=[ - {"vmid": 200, "name": "ct1", "status": "running", "node": "pve1", - "type": "lxc", "maxcpu": 1, "maxmem": 536870912, "mem": 268435456, - "maxdisk": 8589934592, "uptime": 3600, "cpu": 0.02}, - ]) + + mock_client.api_call = AsyncMock( + return_value=[ + { + "vmid": 200, + "name": "ct1", + "status": "running", + "node": "pve1", + "type": "lxc", + "maxcpu": 1, + "maxmem": 536870912, + "mem": 268435456, + "maxdisk": 8589934592, + "uptime": 3600, + "cpu": 0.02, + }, + ] + ) result = await list_containers() assert result["status"] == "success" assert result["containers"][0]["type"] == "lxc" @@ -33,6 +48,7 @@ async def test_list_containers(mock_client): @pytest.mark.asyncio async def test_get_container_status(mock_client): from proxmox_mcp.tools.container import get_container_status + mock_client.api_call = AsyncMock(return_value={"status": "running", "vmid": 200}) result = await get_container_status(vmid=200) assert result["status"] == "success" @@ -41,6 +57,7 @@ async def test_get_container_status(mock_client): @pytest.mark.asyncio async def test_start_container(mock_client): from proxmox_mcp.tools.container import start_container + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00001:start") result = await start_container(vmid=200) assert result["status"] == "submitted" @@ -50,6 +67,7 @@ async def test_start_container(mock_client): async def test_stop_container_protected(mock_client): from proxmox_mcp.tools.container import stop_container from proxmox_mcp.utils.errors import ProtectedResourceError + mock_client.check_protected.side_effect = ProtectedResourceError("protected") result = await stop_container(vmid=200) assert result["status"] == "error" @@ -58,6 +76,7 @@ async def test_stop_container_protected(mock_client): @pytest.mark.asyncio async def test_delete_container_requires_confirm(mock_client): from proxmox_mcp.tools.container import delete_container + mock_client.api_call = AsyncMock( return_value={"status": "stopped", "vmid": 200, "name": "test-ct"} ) @@ -68,8 +87,31 @@ async def test_delete_container_requires_confirm(mock_client): @pytest.mark.asyncio async def test_create_container(mock_client): from proxmox_mcp.tools.container import create_container + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00006:create") result = await create_container( node="pve1", ostemplate="local:vztmpl/ubuntu-22.04.tar.zst", hostname="test-ct" ) assert result["status"] == "submitted" + + +@pytest.mark.asyncio +async def test_modify_container_config_blocks_hookscript(mock_client): + from proxmox_mcp.tools.container import modify_container_config + + result = await modify_container_config( + vmid=200, extra_config='{"hookscript": "local:snippets/evil.sh"}' + ) + assert result["status"] == "error" + assert "hookscript" in result["message"] + + +@pytest.mark.asyncio +async def test_modify_container_config_allows_safe_keys(mock_client): + from proxmox_mcp.tools.container import modify_container_config + + mock_client.api_call = AsyncMock(return_value=None) + result = await modify_container_config( + vmid=200, extra_config='{"memory": 1024, "cores": 2}' + ) + assert result["status"] == "success" diff --git a/tests/test_disk_tools.py b/tests/test_disk_tools.py new file mode 100644 index 0000000..8370f7f --- /dev/null +++ b/tests/test_disk_tools.py @@ -0,0 +1,717 @@ +"""Tests for physical disk management tools.""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from proxmox_mcp.ssh import SSHResult + + +@pytest.fixture +def mock_client(): + """Create a mock ProxmoxClient.""" + client = MagicMock() + client.api_call = AsyncMock() + client.validate_node = MagicMock() + client.is_dry_run = False + return client + + +@pytest.fixture +def mock_ssh(): + """Create a mock SSHExecutor.""" + ssh = MagicMock() + ssh.execute = AsyncMock() + return ssh + + +# --------------------------------------------------------------------------- +# list_physical_disks +# --------------------------------------------------------------------------- + + +class TestListPhysicalDisks: + @pytest.mark.asyncio + async def test_list_disks_success(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import list_physical_disks + + mock_client.api_call.return_value = [ + { + "devpath": "/dev/sda", + "model": "Samsung SSD 870", + "serial": "S5XXNJ0R123456", + "size": 500107862016, + "type": "sata", + "rpm": 0, + "health": "PASSED", + "gpt": 1, + "used": "LVM", + }, + { + "devpath": "/dev/sdb", + "model": "WD Red 10TB", + "serial": "WD-WMC1T0123456", + "size": 10000831348736, + "type": "sata", + "rpm": 5400, + "health": "PASSED", + "gpt": 0, + "used": "", + }, + ] + + # SSH lsblk returns empty (no enrichment needed for basic test) + mock_ssh.execute.return_value = SSHResult( + exit_code=0, stdout='{"blockdevices": []}', stderr="" + ) + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await list_physical_disks("pve1") + + assert result["status"] == "success" + assert result["count"] == 2 + assert result["disks"][0]["device"] == "/dev/sda" + assert result["disks"][0]["in_use"] is True + assert result["disks"][1]["device"] == "/dev/sdb" + assert result["disks"][1]["in_use"] is False + + @pytest.mark.asyncio + async def test_list_disks_filter_unused(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import list_physical_disks + + mock_client.api_call.return_value = [ + {"devpath": "/dev/sda", "size": 500000, "used": "LVM"}, + {"devpath": "/dev/sdb", "size": 10000000, "used": ""}, + ] + mock_ssh.execute.return_value = SSHResult( + exit_code=0, stdout='{"blockdevices": []}', stderr="" + ) + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await list_physical_disks("pve1", filter_unused=True) + + assert result["status"] == "success" + assert result["count"] == 1 + assert result["disks"][0]["device"] == "/dev/sdb" + + @pytest.mark.asyncio + async def test_list_disks_invalid_node(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import list_physical_disks + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await list_physical_disks("") + + assert result["status"] == "error" + + @pytest.mark.asyncio + async def test_list_disks_api_error(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import list_physical_disks + + mock_client.api_call.side_effect = Exception("API unreachable") + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await list_physical_disks("pve1") + + assert result["status"] == "error" + + +# --------------------------------------------------------------------------- +# partition_disk — safety gates +# --------------------------------------------------------------------------- + + +class TestPartitionDisk: + @pytest.mark.asyncio + async def test_requires_confirm(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk("pve1", "/dev/sdb", confirm_destructive=False) + + assert result["status"] == "confirmation_required" + assert "DESTROY" in result["warning"] + + @pytest.mark.asyncio + async def test_rejects_partition_path(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk("pve1", "/dev/sdb1", confirm_destructive=True) + + assert result["status"] == "error" + assert "whole disk" in result["message"] or "invalid" in result["message"].lower() + + @pytest.mark.asyncio + async def test_rejects_boot_disk(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + # Device exists + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult(exit_code=0, stdout="/\n/boot\n", stderr=""), # lsblk mounts + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk("pve1", "/dev/sda", confirm_destructive=True) + + assert result["status"] == "error" + assert "boot" in result["message"].lower() + + @pytest.mark.asyncio + async def test_rejects_mounted_device(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult(exit_code=0, stdout="", stderr=""), # lsblk mounts (no boot) + SSHResult( # findmnt shows mounted + exit_code=0, + stdout="/dev/sdb1 /mnt/data", + stderr="", + ), + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk("pve1", "/dev/sdb", confirm_destructive=True) + + assert result["status"] == "error" + assert "mounted" in result["message"].lower() or "in use" in result["message"].lower() + + @pytest.mark.asyncio + async def test_rejects_device_not_found(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + mock_ssh.execute.return_value = SSHResult(exit_code=1, stdout="", stderr="not found") + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk("pve1", "/dev/sdz", confirm_destructive=True) + + assert result["status"] == "error" + assert "not found" in result["message"].lower() + + @pytest.mark.asyncio + async def test_rejects_shell_injection(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk("pve1", "/dev/sdb;rm -rf /", confirm_destructive=True) + + assert result["status"] == "error" + assert "forbidden" in result["message"].lower() or "invalid" in result["message"].lower() + + @pytest.mark.asyncio + async def test_dry_run(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = { + "status": "dry_run", + "action": "partition_disk", + } + + # Pass all safety gates + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult(exit_code=0, stdout="", stderr=""), # lsblk mounts + SSHResult(exit_code=1, stdout="", stderr=""), # findmnt (not mounted) + SSHResult(exit_code=1, stdout="", stderr=""), # pvs (no LVM) + SSHResult(exit_code=1, stdout="", stderr=""), # zpool (no ZFS) + SSHResult(exit_code=1, stdout="", stderr=""), # mdstat (no RAID) + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk("pve1", "/dev/sdb", confirm_destructive=True) + + assert result["status"] == "dry_run" + + @pytest.mark.asyncio + async def test_success(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import partition_disk + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult(exit_code=0, stdout="", stderr=""), # lsblk mounts + SSHResult(exit_code=1, stdout="", stderr=""), # findmnt + SSHResult(exit_code=1, stdout="", stderr=""), # pvs + SSHResult(exit_code=1, stdout="", stderr=""), # zpool + SSHResult(exit_code=1, stdout="", stderr=""), # mdstat + SSHResult(exit_code=0, stdout="", stderr=""), # wipefs + SSHResult(exit_code=0, stdout="", stderr=""), # sgdisk (GPT + partition) + SSHResult(exit_code=0, stdout="", stderr=""), # blockdev --rereadpt + SSHResult(exit_code=0, stdout="", stderr=""), # mkfs.ext4 + SSHResult( # blkid + exit_code=0, + stdout="UUID=12345678-1234-1234-1234-123456789abc\nTYPE=ext4\n", + stderr="", + ), + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await partition_disk( + "pve1", + "/dev/sdb", + filesystem="ext4", + label="data", + confirm_destructive=True, + ) + + assert result["status"] == "success" + assert result["partition_table"] == "gpt" + assert result["partitions_created"][0]["device"] == "/dev/sdb1" + assert result["partitions_created"][0]["uuid"] == "12345678-1234-1234-1234-123456789abc" + + +# --------------------------------------------------------------------------- +# format_disk — safety gates +# --------------------------------------------------------------------------- + + +class TestFormatDisk: + @pytest.mark.asyncio + async def test_requires_confirm(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import format_disk + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await format_disk("pve1", "/dev/sdb1", "ext4", confirm_destructive=False) + + assert result["status"] == "confirmation_required" + + @pytest.mark.asyncio + async def test_rejects_mounted_partition(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import format_disk + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult(exit_code=0, stdout="/mnt/data", stderr=""), # findmnt + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await format_disk("pve1", "/dev/sdb1", "ext4", confirm_destructive=True) + + assert result["status"] == "error" + assert "mounted" in result["message"].lower() + + @pytest.mark.asyncio + async def test_success(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import format_disk + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult(exit_code=1, stdout="", stderr=""), # findmnt (not mounted) + SSHResult(exit_code=0, stdout="", stderr=""), # wipefs + SSHResult(exit_code=0, stdout="", stderr=""), # mkfs + SSHResult( # blkid + exit_code=0, + stdout="UUID=abcd-ef01\nTYPE=ext4\n", + stderr="", + ), + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await format_disk( + "pve1", + "/dev/sdb1", + "ext4", + label="data", + confirm_destructive=True, + ) + + assert result["status"] == "success" + assert result["filesystem"] == "ext4" + assert result["uuid"] == "abcd-ef01" + + @pytest.mark.asyncio + async def test_rejects_invalid_filesystem(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import format_disk + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await format_disk("pve1", "/dev/sdb1", "ntfs", confirm_destructive=True) + + assert result["status"] == "error" + + @pytest.mark.asyncio + async def test_rejects_injection_in_options(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import format_disk + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await format_disk( + "pve1", + "/dev/sdb1", + "ext4", + options="; rm -rf /", + confirm_destructive=True, + ) + + assert result["status"] == "error" + + +# --------------------------------------------------------------------------- +# create_mount_point +# --------------------------------------------------------------------------- + + +class TestCreateMountPoint: + @pytest.mark.asyncio + async def test_success(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import create_mount_point + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult( # blkid + exit_code=0, + stdout="TYPE=ext4\nUUID=12345678-abcd-1234-abcd-123456789abc\n", + stderr="", + ), + SSHResult(exit_code=1, stdout="", stderr=""), # findmnt (not mounted) + SSHResult(exit_code=0, stdout="", stderr=""), # mkdir + SSHResult(exit_code=0, stdout="", stderr=""), # mount + SSHResult(exit_code=0, stdout="/dev/sdb1", stderr=""), # findmnt verify + SSHResult(exit_code=0, stdout="", stderr=""), # cp fstab backup + SSHResult(exit_code=0, stdout="", stderr=""), # echo >> fstab + SSHResult(exit_code=0, stdout="", stderr=""), # mount -a --fake + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await create_mount_point("pve1", "/dev/sdb1", "/mnt/data") + + assert result["status"] == "success" + assert result["mount_path"] == "/mnt/data" + assert result["fstab_entry_added"] is True + + @pytest.mark.asyncio + async def test_rejects_invalid_mount_path(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import create_mount_point + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await create_mount_point("pve1", "/dev/sdb1", "/etc/data") + + assert result["status"] == "error" + + @pytest.mark.asyncio + async def test_rejects_path_traversal(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import create_mount_point + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await create_mount_point("pve1", "/dev/sdb1", "/mnt/../../etc/passwd") + + assert result["status"] == "error" + + @pytest.mark.asyncio + async def test_rejects_already_mounted(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import create_mount_point + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult( # blkid + exit_code=0, + stdout="TYPE=ext4\nUUID=aabbccdd-1122-3344-5566-778899aabbcc\n", + stderr="", + ), + SSHResult(exit_code=0, stdout="/mnt/data", stderr=""), # findmnt (already mounted!) + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await create_mount_point("pve1", "/dev/sdb1", "/mnt/data") + + assert result["status"] == "error" + assert "already" in result["message"].lower() + + @pytest.mark.asyncio + async def test_fstab_rollback_on_validation_failure(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import create_mount_point + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="exists", stderr=""), # test -b + SSHResult( # blkid + exit_code=0, + stdout="TYPE=ext4\nUUID=aabbccdd-1122-3344-5566-778899aabbcc\n", + stderr="", + ), + SSHResult(exit_code=1, stdout="", stderr=""), # findmnt (not mounted) + SSHResult(exit_code=0, stdout="", stderr=""), # mkdir + SSHResult(exit_code=0, stdout="", stderr=""), # mount + SSHResult(exit_code=0, stdout="/dev/sdb1", stderr=""), # findmnt verify + SSHResult(exit_code=0, stdout="", stderr=""), # cp fstab backup + SSHResult(exit_code=0, stdout="", stderr=""), # echo >> fstab + SSHResult(exit_code=1, stdout="", stderr="parse error"), # mount -a --fake FAILS + SSHResult(exit_code=0, stdout="", stderr=""), # cp fstab rollback + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await create_mount_point("pve1", "/dev/sdb1", "/mnt/data") + + assert result["status"] == "success" + # fstab entry should NOT have been added due to rollback + assert result["fstab_entry_added"] is False + + +# --------------------------------------------------------------------------- +# unmount_path +# --------------------------------------------------------------------------- + + +class TestUnmountPath: + @pytest.mark.asyncio + async def test_success(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import unmount_path + + mock_ssh.execute.side_effect = [ + SSHResult(exit_code=0, stdout="/dev/sdb1", stderr=""), # findmnt (is mounted) + SSHResult(exit_code=0, stdout="", stderr=""), # umount + ] + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await unmount_path("pve1", "/mnt/data") + + assert result["status"] == "success" + assert result["unmounted"] is True + + @pytest.mark.asyncio + async def test_rejects_not_mounted(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import unmount_path + + mock_ssh.execute.return_value = SSHResult(exit_code=1, stdout="", stderr="") + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await unmount_path("pve1", "/mnt/data") + + assert result["status"] == "error" + assert "not currently mounted" in result["message"].lower() + + @pytest.mark.asyncio + async def test_rejects_critical_path(self, mock_client, mock_ssh): + from proxmox_mcp.tools.disk import unmount_path + + with ( + patch("proxmox_mcp.tools.disk.get_client", return_value=mock_client), + patch("proxmox_mcp.tools.disk.get_ssh", return_value=mock_ssh), + ): + result = await unmount_path("pve1", "/etc/data") + + assert result["status"] == "error" + + +# --------------------------------------------------------------------------- +# add_storage / remove_storage +# --------------------------------------------------------------------------- + + +class TestAddStorage: + @pytest.mark.asyncio + async def test_add_dir_storage_success(self, mock_client): + from proxmox_mcp.tools.storage import add_storage + + mock_client.api_call.side_effect = [ + [], # GET /storage (no existing) + None, # POST /storage + ] + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await add_storage( + storage_id="data", + storage_type="dir", + content="images,iso,vztmpl", + path="/mnt/data", + nodes="pve1", + ) + + assert result["status"] == "success" + assert result["storage_id"] == "data" + assert "images" in result["content"] + assert "iso" in result["content"] + + @pytest.mark.asyncio + async def test_add_storage_duplicate_id(self, mock_client): + from proxmox_mcp.tools.storage import add_storage + + mock_client.api_call.return_value = [ + {"storage": "data", "type": "dir"}, + ] + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await add_storage( + storage_id="data", + storage_type="dir", + content="images", + path="/mnt/data", + ) + + assert result["status"] == "error" + assert "already exists" in result["message"] + + @pytest.mark.asyncio + async def test_add_storage_invalid_type(self, mock_client): + from proxmox_mcp.tools.storage import add_storage + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await add_storage( + storage_id="test", + storage_type="invalid", + content="images", + ) + + assert result["status"] == "error" + assert "Invalid storage type" in result["message"] + + @pytest.mark.asyncio + async def test_add_storage_invalid_content(self, mock_client): + from proxmox_mcp.tools.storage import add_storage + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await add_storage( + storage_id="test", + storage_type="dir", + content="images,bogus", + path="/mnt/test", + ) + + assert result["status"] == "error" + assert "Invalid content" in result["message"] + + @pytest.mark.asyncio + async def test_add_nfs_requires_server_and_export(self, mock_client): + from proxmox_mcp.tools.storage import add_storage + + mock_client.api_call.return_value = [] # no existing storage + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await add_storage( + storage_id="nfs1", + storage_type="nfs", + content="backup", + ) + + assert result["status"] == "error" + assert "server" in result["message"].lower() + + @pytest.mark.asyncio + async def test_add_storage_dry_run(self, mock_client): + from proxmox_mcp.tools.storage import add_storage + + mock_client.is_dry_run = True + mock_client.api_call.return_value = [] + mock_client.dry_run_response.return_value = {"status": "dry_run"} + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await add_storage( + storage_id="test", + storage_type="dir", + content="images", + path="/mnt/test", + ) + + assert result["status"] == "dry_run" + + +class TestRemoveStorage: + @pytest.mark.asyncio + async def test_requires_confirm(self, mock_client): + from proxmox_mcp.tools.storage import remove_storage + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await remove_storage("my-storage", confirm=False) + + assert result["status"] == "confirmation_required" + + @pytest.mark.asyncio + async def test_rejects_default_storage(self, mock_client): + from proxmox_mcp.tools.storage import remove_storage + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await remove_storage("local", confirm=True) + + assert result["status"] == "error" + assert "default" in result["message"].lower() + + @pytest.mark.asyncio + async def test_rejects_local_lvm(self, mock_client): + from proxmox_mcp.tools.storage import remove_storage + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await remove_storage("local-lvm", confirm=True) + + assert result["status"] == "error" + + @pytest.mark.asyncio + async def test_success(self, mock_client): + from proxmox_mcp.tools.storage import remove_storage + + mock_client.api_call.return_value = None + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await remove_storage("my-data", confirm=True) + + assert result["status"] == "success" + assert "my-data" in result["message"] diff --git a/tests/test_errors.py b/tests/test_errors.py index 1ee7cbd..fcf40f9 100644 --- a/tests/test_errors.py +++ b/tests/test_errors.py @@ -1,24 +1,30 @@ from proxmox_mcp.utils.errors import ( - ProxmoxMCPError, - ProxmoxConnectionError, AuthenticationError, - VMNotFoundError, ContainerNotFoundError, + InsufficientPermissionsError, + InvalidParameterError, + NodeNotAllowedError, NodeNotFoundError, ProtectedResourceError, - NodeNotAllowedError, + ProxmoxConnectionError, + ProxmoxMCPError, TaskTimeoutError, - InsufficientPermissionsError, - InvalidParameterError, + VMNotFoundError, format_error_response, ) def test_all_exceptions_inherit_from_base(): for exc_class in [ - ProxmoxConnectionError, AuthenticationError, VMNotFoundError, - ContainerNotFoundError, NodeNotFoundError, ProtectedResourceError, - NodeNotAllowedError, TaskTimeoutError, InsufficientPermissionsError, + ProxmoxConnectionError, + AuthenticationError, + VMNotFoundError, + ContainerNotFoundError, + NodeNotFoundError, + ProtectedResourceError, + NodeNotAllowedError, + TaskTimeoutError, + InsufficientPermissionsError, InvalidParameterError, ]: assert issubclass(exc_class, ProxmoxMCPError) diff --git a/tests/test_formatters.py b/tests/test_formatters.py index 097da93..9848026 100644 --- a/tests/test_formatters.py +++ b/tests/test_formatters.py @@ -1,17 +1,25 @@ from proxmox_mcp.utils.formatters import ( - format_vm_summary, - format_container_summary, format_bytes, - format_uptime, + format_container_summary, format_task_result, + format_uptime, + format_vm_summary, ) def test_format_vm_summary(): raw = { - "vmid": 100, "name": "test-vm", "status": "running", "node": "pve1", - "maxcpu": 4, "maxmem": 4294967296, "mem": 2147483648, - "maxdisk": 34359738368, "uptime": 90061, "cpu": 0.156, "tags": "web;prod", + "vmid": 100, + "name": "test-vm", + "status": "running", + "node": "pve1", + "maxcpu": 4, + "maxmem": 4294967296, + "mem": 2147483648, + "maxdisk": 34359738368, + "uptime": 90061, + "cpu": 0.156, + "tags": "web;prod", } result = format_vm_summary(raw) assert result["vmid"] == 100 @@ -27,9 +35,16 @@ def test_format_vm_summary(): def test_format_container_summary(): raw = { - "vmid": 200, "name": "ct-test", "status": "stopped", "node": "pve2", - "maxcpu": 2, "maxmem": 1073741824, "mem": 0, "maxdisk": 8589934592, - "uptime": 0, "cpu": 0, + "vmid": 200, + "name": "ct-test", + "status": "stopped", + "node": "pve2", + "maxcpu": 2, + "maxmem": 1073741824, + "mem": 0, + "maxdisk": 8589934592, + "uptime": 0, + "cpu": 0, } result = format_container_summary(raw) assert result["type"] == "lxc" diff --git a/tests/test_network_tools.py b/tests/test_network_tools.py new file mode 100644 index 0000000..dd6a930 --- /dev/null +++ b/tests/test_network_tools.py @@ -0,0 +1,249 @@ +"""Tests for network and firewall tools.""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + + +@pytest.fixture +def mock_client(): + with patch("proxmox_mcp.tools.network.get_client") as mock_get: + client = MagicMock() + client.config.PROXMOX_ALLOWED_NODES = [] + client.config.PROXMOX_PROTECTED_VMIDS = [] + client.is_dry_run = False + client.resolve_node_for_vmid = AsyncMock(return_value="pve1") + client.resolve_node = AsyncMock(return_value="pve1") + mock_get.return_value = client + yield client + + +# --- get_node_firewall_rules --- + + +@pytest.mark.asyncio +async def test_get_node_firewall_rules(mock_client): + from proxmox_mcp.tools.network import get_node_firewall_rules + + rules = [ + {"pos": 0, "type": "in", "action": "ACCEPT", "proto": "tcp", "dport": "22"}, + {"pos": 1, "type": "in", "action": "ACCEPT", "proto": "tcp", "dport": "8006"}, + ] + mock_client.api_call = AsyncMock(return_value=rules) + result = await get_node_firewall_rules(node="pve1") + + assert result["status"] == "success" + assert result["node"] == "pve1" + assert result["total"] == 2 + assert result["rules"] == rules + + +@pytest.mark.asyncio +async def test_get_node_firewall_rules_empty(mock_client): + from proxmox_mcp.tools.network import get_node_firewall_rules + + mock_client.api_call = AsyncMock(return_value=[]) + result = await get_node_firewall_rules(node="pve1") + + assert result["status"] == "success" + assert result["total"] == 0 + assert result["rules"] == [] + + +@pytest.mark.asyncio +async def test_get_node_firewall_rules_error(mock_client): + from proxmox_mcp.tools.network import get_node_firewall_rules + + mock_client.api_call = AsyncMock(side_effect=Exception("API error")) + result = await get_node_firewall_rules(node="pve1") + + assert result["status"] == "error" + + +# --- get_vm_firewall_rules --- + + +@pytest.mark.asyncio +async def test_get_vm_firewall_rules(mock_client): + from proxmox_mcp.tools.network import get_vm_firewall_rules + + rules = [{"pos": 0, "type": "in", "action": "DROP", "proto": "tcp", "dport": "3389"}] + mock_client.api_call = AsyncMock(return_value=rules) + result = await get_vm_firewall_rules(vmid=100) + + assert result["status"] == "success" + assert result["vmid"] == 100 + assert result["total"] == 1 + + +@pytest.mark.asyncio +async def test_get_vm_firewall_rules_fallback_to_lxc(mock_client): + from proxmox_mcp.tools.network import get_vm_firewall_rules + + lxc_rules = [{"pos": 0, "type": "in", "action": "ACCEPT", "proto": "tcp", "dport": "80"}] + # First call (QEMU) fails, second (LXC) succeeds + mock_client.api_call = AsyncMock(side_effect=[Exception("not qemu"), lxc_rules]) + result = await get_vm_firewall_rules(vmid=200) + + assert result["status"] == "success" + assert result["vmid"] == 200 + assert result["total"] == 1 + assert result["rules"] == lxc_rules + + +@pytest.mark.asyncio +async def test_get_vm_firewall_rules_error(mock_client): + from proxmox_mcp.tools.network import get_vm_firewall_rules + + mock_client.api_call = AsyncMock(side_effect=Exception("Both APIs failed")) + result = await get_vm_firewall_rules(vmid=100) + + assert result["status"] == "error" + + +# --- get_vm_interfaces --- + + +@pytest.mark.asyncio +async def test_get_vm_interfaces(mock_client): + from proxmox_mcp.tools.network import get_vm_interfaces + + ifaces = {"result": [{"name": "eth0", "ip-addresses": [{"ip-address": "10.0.0.5"}]}]} + mock_client.api_call = AsyncMock(return_value=ifaces) + result = await get_vm_interfaces(vmid=100) + + assert result["status"] == "success" + assert result["vmid"] == 100 + assert len(result["interfaces"]) == 1 + + +@pytest.mark.asyncio +async def test_get_vm_interfaces_fallback_to_lxc(mock_client): + from proxmox_mcp.tools.network import get_vm_interfaces + + lxc_ifaces = [{"name": "eth0", "hwaddr": "AA:BB:CC:DD:EE:FF"}] + mock_client.api_call = AsyncMock(side_effect=[Exception("no agent"), lxc_ifaces]) + result = await get_vm_interfaces(vmid=200) + + assert result["status"] == "success" + assert result["interfaces"] == lxc_ifaces + + +@pytest.mark.asyncio +async def test_get_vm_interfaces_error(mock_client): + from proxmox_mcp.tools.network import get_vm_interfaces + + mock_client.api_call = AsyncMock(side_effect=Exception("VM not running")) + result = await get_vm_interfaces(vmid=100) + + assert result["status"] == "error" + assert "suggestion" in result + + +# --- create_node_firewall_rule --- + + +@pytest.mark.asyncio +async def test_create_node_firewall_rule(mock_client): + from proxmox_mcp.tools.network import create_node_firewall_rule + + mock_client.api_call = AsyncMock(return_value=None) + result = await create_node_firewall_rule( + node="pve1", action="ACCEPT", type="in", proto="tcp", dport="22" + ) + assert result["status"] == "success" + assert result["rule"]["action"] == "ACCEPT" + assert result["rule"]["dport"] == "22" + + +@pytest.mark.asyncio +async def test_create_node_firewall_rule_invalid_action(mock_client): + from proxmox_mcp.tools.network import create_node_firewall_rule + + result = await create_node_firewall_rule( + node="pve1", action="ALLOW", type="in" + ) + assert result["status"] == "error" + assert "ALLOW" in result["message"] + + +@pytest.mark.asyncio +async def test_create_node_firewall_rule_dry_run(mock_client): + from proxmox_mcp.tools.network import create_node_firewall_rule + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = {"status": "dry_run"} + result = await create_node_firewall_rule( + node="pve1", action="ACCEPT", type="in" + ) + assert result["status"] == "dry_run" + + +# --- delete_node_firewall_rule --- + + +@pytest.mark.asyncio +async def test_delete_node_firewall_rule_requires_confirm(mock_client): + from proxmox_mcp.tools.network import delete_node_firewall_rule + + result = await delete_node_firewall_rule(node="pve1", pos=0) + assert result["status"] == "confirmation_required" + + +@pytest.mark.asyncio +async def test_delete_node_firewall_rule_confirmed(mock_client): + from proxmox_mcp.tools.network import delete_node_firewall_rule + + mock_client.api_call = AsyncMock(return_value=None) + result = await delete_node_firewall_rule(node="pve1", pos=0, confirm=True) + assert result["status"] == "success" + assert result["deleted_pos"] == 0 + + +# --- create_vm_firewall_rule --- + + +@pytest.mark.asyncio +async def test_create_vm_firewall_rule(mock_client): + from proxmox_mcp.tools.network import create_vm_firewall_rule + + mock_client.api_call = AsyncMock(return_value=None) + result = await create_vm_firewall_rule( + vmid=100, action="DROP", type="in", proto="tcp", dport="3389" + ) + assert result["status"] == "success" + assert result["vmid"] == 100 + assert result["rule"]["action"] == "DROP" + + +@pytest.mark.asyncio +async def test_create_vm_firewall_rule_protected(mock_client): + from proxmox_mcp.tools.network import create_vm_firewall_rule + from proxmox_mcp.utils.errors import ProtectedResourceError + + mock_client.check_protected.side_effect = ProtectedResourceError("protected") + result = await create_vm_firewall_rule( + vmid=100, action="ACCEPT", type="in" + ) + assert result["status"] == "error" + + +# --- delete_vm_firewall_rule --- + + +@pytest.mark.asyncio +async def test_delete_vm_firewall_rule_requires_confirm(mock_client): + from proxmox_mcp.tools.network import delete_vm_firewall_rule + + result = await delete_vm_firewall_rule(vmid=100, pos=0) + assert result["status"] == "confirmation_required" + + +@pytest.mark.asyncio +async def test_delete_vm_firewall_rule_confirmed(mock_client): + from proxmox_mcp.tools.network import delete_vm_firewall_rule + + mock_client.api_call = AsyncMock(return_value=None) + result = await delete_vm_firewall_rule(vmid=100, pos=0, confirm=True) + assert result["status"] == "success" + assert result["deleted_pos"] == 0 diff --git a/tests/test_node_tools.py b/tests/test_node_tools.py index cffee08..f2705da 100644 --- a/tests/test_node_tools.py +++ b/tests/test_node_tools.py @@ -1,7 +1,9 @@ """Tests for node tools.""" +from unittest.mock import AsyncMock, MagicMock, patch + import pytest -from unittest.mock import MagicMock, AsyncMock, patch + from proxmox_mcp.utils.errors import NodeNotAllowedError @@ -16,6 +18,7 @@ def mock_client(): # --- list_nodes --- + @pytest.mark.asyncio async def test_list_nodes(mock_client): from proxmox_mcp.tools.node import list_nodes @@ -85,6 +88,7 @@ async def test_list_nodes_error(mock_client): # --- get_node_status --- + @pytest.mark.asyncio async def test_get_node_status(mock_client): from proxmox_mcp.tools.node import get_node_status @@ -134,6 +138,7 @@ async def test_get_node_status_invalid_name(): # --- get_node_services --- + @pytest.mark.asyncio async def test_get_node_services(mock_client): from proxmox_mcp.tools.node import get_node_services @@ -153,6 +158,7 @@ async def test_get_node_services(mock_client): # --- get_node_network --- + @pytest.mark.asyncio async def test_get_node_network(mock_client): from proxmox_mcp.tools.node import get_node_network @@ -172,6 +178,7 @@ async def test_get_node_network(mock_client): # --- get_node_storage --- + @pytest.mark.asyncio async def test_get_node_storage(mock_client): from proxmox_mcp.tools.node import get_node_storage @@ -191,6 +198,7 @@ async def test_get_node_storage(mock_client): # --- get_node_syslog --- + @pytest.mark.asyncio async def test_get_node_syslog(mock_client): from proxmox_mcp.tools.node import get_node_syslog @@ -221,3 +229,82 @@ async def test_get_node_syslog_with_since(mock_client): assert result["status"] == "success" assert result["count"] == 1 + + +# --- reboot_node --- + + +@pytest.mark.asyncio +async def test_reboot_node_requires_confirm(mock_client): + from proxmox_mcp.tools.node import reboot_node + + with patch("proxmox_mcp.tools.node.get_client", return_value=mock_client): + result = await reboot_node("pve1") + + assert result["status"] == "confirmation_required" + assert "REBOOT" in result["warning"] + + +@pytest.mark.asyncio +async def test_reboot_node_confirmed(mock_client): + from proxmox_mcp.tools.node import reboot_node + + mock_client.api_call.return_value = "UPID:pve1:00001:reboot" + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.node.get_client", return_value=mock_client): + result = await reboot_node("pve1", confirm=True) + + assert result["status"] == "submitted" + + +@pytest.mark.asyncio +async def test_reboot_node_dry_run(mock_client): + from proxmox_mcp.tools.node import reboot_node + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = {"status": "dry_run"} + + with patch("proxmox_mcp.tools.node.get_client", return_value=mock_client): + result = await reboot_node("pve1", confirm=True) + + assert result["status"] == "dry_run" + + +# --- shutdown_node --- + + +@pytest.mark.asyncio +async def test_shutdown_node_requires_confirm(mock_client): + from proxmox_mcp.tools.node import shutdown_node + + with patch("proxmox_mcp.tools.node.get_client", return_value=mock_client): + result = await shutdown_node("pve1") + + assert result["status"] == "confirmation_required" + assert "SHUT DOWN" in result["warning"] + + +@pytest.mark.asyncio +async def test_shutdown_node_confirmed(mock_client): + from proxmox_mcp.tools.node import shutdown_node + + mock_client.api_call.return_value = "UPID:pve1:00002:shutdown" + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.node.get_client", return_value=mock_client): + result = await shutdown_node("pve1", confirm=True) + + assert result["status"] == "submitted" + + +@pytest.mark.asyncio +async def test_shutdown_node_not_allowed(mock_client): + from proxmox_mcp.tools.node import shutdown_node + + mock_client.validate_node.side_effect = NodeNotAllowedError("not allowed") + + with patch("proxmox_mcp.tools.node.get_client", return_value=mock_client): + result = await shutdown_node("pve3", confirm=True) + + assert result["status"] == "error" diff --git a/tests/test_prompts.py b/tests/test_prompts.py new file mode 100644 index 0000000..ef5c429 --- /dev/null +++ b/tests/test_prompts.py @@ -0,0 +1,62 @@ +"""Tests for MCP prompt templates.""" + +from proxmox_mcp.prompts.prompts import ( + capacity_planning, + disaster_recovery_check, + infrastructure_overview, + security_audit, + troubleshoot_vm, + vm_deployment, +) + + +def test_infrastructure_overview(): + result = infrastructure_overview() + assert isinstance(result, str) + assert "cluster" in result.lower() + assert "node" in result.lower() + assert "storage" in result.lower() + + +def test_capacity_planning(): + result = capacity_planning() + assert isinstance(result, str) + assert "capacity" in result.lower() + assert "cpu" in result.lower() + assert "ram" in result.lower() + + +def test_vm_deployment_defaults(): + result = vm_deployment(name="web-server", purpose="web hosting") + assert isinstance(result, str) + assert "web-server" in result + assert "web hosting" in result + assert "linux" in result + + +def test_vm_deployment_custom_os(): + result = vm_deployment(name="win-dc", purpose="domain controller", os="windows") + assert isinstance(result, str) + assert "win-dc" in result + assert "windows" in result + + +def test_disaster_recovery_check(): + result = disaster_recovery_check() + assert isinstance(result, str) + assert "backup" in result.lower() + assert "snapshot" in result.lower() + + +def test_security_audit(): + result = security_audit() + assert isinstance(result, str) + assert "firewall" in result.lower() + assert "security" in result.lower() + + +def test_troubleshoot_vm(): + result = troubleshoot_vm(vmid=100) + assert isinstance(result, str) + assert "100" in result + assert "status" in result.lower() diff --git a/tests/test_resources.py b/tests/test_resources.py new file mode 100644 index 0000000..09b2727 --- /dev/null +++ b/tests/test_resources.py @@ -0,0 +1,300 @@ +"""Tests for MCP resource definitions.""" + +import json +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + + +@pytest.fixture +def mock_client(): + with patch("proxmox_mcp.resources.resources.get_client") as mock_get: + client = MagicMock() + client.resolve_node_for_vmid = AsyncMock(return_value="pve1") + mock_get.return_value = client + yield client + + +# --- cluster_status --- + + +@pytest.mark.asyncio +async def test_cluster_status(mock_client): + from proxmox_mcp.resources.resources import cluster_status + + mock_client.api_call = AsyncMock(return_value=[{"type": "cluster", "name": "pve"}]) + result = await cluster_status() + + data = json.loads(result) + assert isinstance(data, list) + assert data[0]["type"] == "cluster" + + +@pytest.mark.asyncio +async def test_cluster_status_error(mock_client): + from proxmox_mcp.resources.resources import cluster_status + + mock_client.api_call = AsyncMock(side_effect=Exception("connection refused")) + result = await cluster_status() + + data = json.loads(result) + assert "error" in data + + +# --- cluster_resources --- + + +@pytest.mark.asyncio +async def test_cluster_resources(mock_client): + from proxmox_mcp.resources.resources import cluster_resources + + mock_client.api_call = AsyncMock( + return_value=[{"type": "qemu", "vmid": 100, "name": "vm1", "node": "pve1"}] + ) + result = await cluster_resources() + + data = json.loads(result) + assert len(data) == 1 + assert data[0]["vmid"] == 100 + + +# --- nodes_list --- + + +@pytest.mark.asyncio +async def test_nodes_list(mock_client): + from proxmox_mcp.resources.resources import nodes_list + + mock_client.api_call = AsyncMock( + return_value=[{"node": "pve1", "status": "online", "cpu": 0.1}] + ) + result = await nodes_list() + + data = json.loads(result) + assert data[0]["node"] == "pve1" + + +# --- node_status --- + + +@pytest.mark.asyncio +async def test_node_status(mock_client): + from proxmox_mcp.resources.resources import node_status + + mock_client.api_call = AsyncMock( + return_value={"uptime": 86400, "memory": {"used": 1024, "total": 8192}} + ) + result = await node_status(node="pve1") + + data = json.loads(result) + assert data["uptime"] == 86400 + + +@pytest.mark.asyncio +async def test_node_status_error(mock_client): + from proxmox_mcp.resources.resources import node_status + + mock_client.api_call = AsyncMock(side_effect=Exception("node offline")) + result = await node_status(node="pve1") + + data = json.loads(result) + assert "error" in data + + +# --- all_vms --- + + +@pytest.mark.asyncio +async def test_all_vms(mock_client): + from proxmox_mcp.resources.resources import all_vms + + mock_client.api_call = AsyncMock( + return_value=[ + { + "type": "qemu", + "vmid": 100, + "name": "vm1", + "status": "running", + "node": "pve1", + "maxcpu": 2, + "maxmem": 2147483648, + "mem": 1073741824, + "maxdisk": 10737418240, + "uptime": 3600, + "cpu": 0.05, + }, + { + "type": "lxc", + "vmid": 200, + "name": "ct1", + "status": "running", + "node": "pve1", + "maxcpu": 1, + "maxmem": 536870912, + "mem": 268435456, + "maxdisk": 4294967296, + "uptime": 7200, + "cpu": 0.02, + }, + ] + ) + result = await all_vms() + + data = json.loads(result) + # Should only include QEMU VMs, not LXC + assert len(data) == 1 + assert data[0]["vmid"] == 100 + assert data[0]["type"] == "qemu" + + +# --- all_containers --- + + +@pytest.mark.asyncio +async def test_all_containers(mock_client): + from proxmox_mcp.resources.resources import all_containers + + mock_client.api_call = AsyncMock( + return_value=[ + { + "type": "qemu", + "vmid": 100, + "name": "vm1", + "status": "running", + "node": "pve1", + "maxcpu": 2, + "maxmem": 2147483648, + "mem": 1073741824, + "maxdisk": 10737418240, + "uptime": 3600, + "cpu": 0.05, + }, + { + "type": "lxc", + "vmid": 200, + "name": "ct1", + "status": "running", + "node": "pve1", + "maxcpu": 1, + "maxmem": 536870912, + "mem": 268435456, + "maxdisk": 4294967296, + "uptime": 7200, + "cpu": 0.02, + }, + ] + ) + result = await all_containers() + + data = json.loads(result) + # Should only include LXC containers, not QEMU + assert len(data) == 1 + assert data[0]["vmid"] == 200 + assert data[0]["type"] == "lxc" + + +# --- vm_detail --- + + +@pytest.mark.asyncio +async def test_vm_detail(mock_client): + from proxmox_mcp.resources.resources import vm_detail + + mock_client.api_call = AsyncMock( + side_effect=[ + {"status": "running", "vmid": 100}, # status.current.get + {"name": "test-vm", "memory": 2048}, # config.get + ] + ) + result = await vm_detail(vmid=100) + + data = json.loads(result) + assert data["status"]["vmid"] == 100 + assert data["config"]["name"] == "test-vm" + assert data["node"] == "pve1" + + +@pytest.mark.asyncio +async def test_vm_detail_not_found(mock_client): + from proxmox_mcp.resources.resources import vm_detail + + mock_client.resolve_node_for_vmid = AsyncMock(side_effect=Exception("VMID 999 not found")) + result = await vm_detail(vmid=999) + + data = json.loads(result) + assert "error" in data + + +# --- container_detail --- + + +@pytest.mark.asyncio +async def test_container_detail(mock_client): + from proxmox_mcp.resources.resources import container_detail + + mock_client.api_call = AsyncMock( + side_effect=[ + {"status": "running", "vmid": 200}, # status.current.get + {"hostname": "ct1", "memory": 512}, # config.get + ] + ) + result = await container_detail(vmid=200) + + data = json.loads(result) + assert data["status"]["vmid"] == 200 + assert data["config"]["hostname"] == "ct1" + + +# --- storage_overview --- + + +@pytest.mark.asyncio +async def test_storage_overview(mock_client): + from proxmox_mcp.resources.resources import storage_overview + + mock_client.api_call = AsyncMock( + return_value=[ + {"storage": "local", "type": "dir", "content": "images,iso"}, + {"storage": "local-lvm", "type": "lvmthin", "content": "images,rootdir"}, + ] + ) + result = await storage_overview() + + data = json.loads(result) + assert len(data) == 2 + assert data[0]["storage"] == "local" + + +# --- recent_tasks --- + + +@pytest.mark.asyncio +async def test_recent_tasks(mock_client): + from proxmox_mcp.resources.resources import recent_tasks + + mock_client.api_call = AsyncMock( + side_effect=[ + [{"node": "pve1"}], # nodes.get + [ # tasks.get for pve1 + {"upid": "UPID:pve1:001", "status": "ok", "starttime": 1000}, + {"upid": "UPID:pve1:002", "status": "running", "starttime": 2000}, + ], + ] + ) + result = await recent_tasks() + + data = json.loads(result) + assert len(data) == 2 + # Should be sorted by starttime descending + assert data[0]["starttime"] == 2000 + + +@pytest.mark.asyncio +async def test_recent_tasks_error(mock_client): + from proxmox_mcp.resources.resources import recent_tasks + + mock_client.api_call = AsyncMock(side_effect=Exception("cluster error")) + result = await recent_tasks() + + data = json.loads(result) + assert "error" in data diff --git a/tests/test_sanitizers.py b/tests/test_sanitizers.py new file mode 100644 index 0000000..0baf9f7 --- /dev/null +++ b/tests/test_sanitizers.py @@ -0,0 +1,277 @@ +"""Tests for input sanitization and validation functions.""" + +import pytest + +from proxmox_mcp.utils.errors import InvalidParameterError +from proxmox_mcp.utils.sanitizers import ( + check_shell_injection, + validate_device_path, + validate_filesystem, + validate_label, + validate_mkfs_options, + validate_mount_options, + validate_mount_path, + validate_partition_table, + validate_snapname, + validate_storage_id, + validate_uuid, +) + +# --- check_shell_injection --- + + +class TestShellInjection: + def test_clean_string_passes(self): + check_shell_injection("hello-world_123", "test") + + @pytest.mark.parametrize( + "bad_input", + [ + "hello;world", + "test|pipe", + "test&bg", + "$(whoami)", + "test`cmd`", + "test\\escape", + "test'quote", + 'test"double', + "test(paren)", + "test{brace}", + "test", + "test!bang", + "test~tilde", + "test#hash", + "test\nnewline", + "test\rcarriage", + ], + ) + def test_rejects_shell_metacharacters(self, bad_input): + with pytest.raises(InvalidParameterError, match="forbidden characters"): + check_shell_injection(bad_input, "test_param") + + +# --- validate_device_path --- + + +class TestDevicePath: + def test_valid_whole_disk(self): + validate_device_path("/dev/sdb") + validate_device_path("/dev/nvme0n1") + validate_device_path("/dev/vda") + + def test_rejects_partition_when_not_allowed(self): + with pytest.raises(InvalidParameterError, match="whole disk"): + validate_device_path("/dev/sdb1") + + def test_allows_partition_when_flag_set(self): + validate_device_path("/dev/sdb1", allow_partition=True) + validate_device_path("/dev/nvme0n1p1", allow_partition=True) + + def test_rejects_mapper_paths(self): + with pytest.raises(InvalidParameterError): + validate_device_path("/dev/mapper/vg-lv") + + def test_rejects_relative_paths(self): + with pytest.raises(InvalidParameterError): + validate_device_path("sdb") + + def test_rejects_injection_in_device(self): + with pytest.raises(InvalidParameterError): + validate_device_path("/dev/sdb;rm -rf /") + + +# --- validate_mount_path --- + + +class TestMountPath: + def test_valid_paths(self): + validate_mount_path("/mnt/data") + validate_mount_path("/srv/storage") + validate_mount_path("/media/usb") + validate_mount_path("/mnt/my-data/subdir") + + def test_rejects_root(self): + with pytest.raises(InvalidParameterError): + validate_mount_path("/") + + def test_rejects_etc(self): + with pytest.raises(InvalidParameterError): + validate_mount_path("/etc/something") + + def test_rejects_var(self): + with pytest.raises(InvalidParameterError): + validate_mount_path("/var/lib/data") + + def test_rejects_path_traversal(self): + with pytest.raises(InvalidParameterError, match="path traversal"): + validate_mount_path("/mnt/../../etc/passwd") + + def test_rejects_home(self): + with pytest.raises(InvalidParameterError): + validate_mount_path("/home/user/data") + + def test_rejects_injection(self): + with pytest.raises(InvalidParameterError): + validate_mount_path("/mnt/data;rm -rf /") + + +# --- validate_storage_id --- + + +class TestStorageId: + def test_valid_ids(self): + validate_storage_id("local-data") + validate_storage_id("ssd_vms") + validate_storage_id("backup1") + validate_storage_id("a") + + def test_rejects_starting_with_number(self): + with pytest.raises(InvalidParameterError): + validate_storage_id("1storage") + + def test_rejects_special_chars(self): + with pytest.raises(InvalidParameterError): + validate_storage_id("my storage") + + def test_rejects_injection(self): + with pytest.raises(InvalidParameterError): + validate_storage_id("test;drop") + + +# --- validate_label --- + + +class TestLabel: + def test_valid_labels(self): + validate_label("data") + validate_label("vm-storage") + validate_label("BOOT_EFI") + + def test_rejects_too_long(self): + with pytest.raises(InvalidParameterError): + validate_label("a" * 17) + + def test_rejects_special_chars(self): + with pytest.raises(InvalidParameterError): + validate_label("my label") + + +# --- validate_mount_options --- + + +class TestMountOptions: + def test_valid_options(self): + validate_mount_options("defaults") + validate_mount_options("defaults,noatime") + validate_mount_options("rw,nosuid,nodev") + validate_mount_options("commit=30") + + def test_rejects_unknown_option(self): + with pytest.raises(InvalidParameterError, match="not allowed"): + validate_mount_options("defaults,exec_shell") + + def test_rejects_injection(self): + with pytest.raises(InvalidParameterError): + validate_mount_options("defaults;rm -rf /") + + +# --- validate_mkfs_options --- + + +class TestMkfsOptions: + def test_valid_options(self): + validate_mkfs_options("-m 1") + validate_mkfs_options("-L mydata") + + def test_rejects_unknown_flag(self): + with pytest.raises(InvalidParameterError): + validate_mkfs_options("-Z evil") + + def test_rejects_injection(self): + with pytest.raises(InvalidParameterError): + validate_mkfs_options("-m 1; rm -rf /") + + +# --- validate_filesystem --- + + +class TestFilesystem: + def test_valid_filesystems(self): + validate_filesystem("ext4") + validate_filesystem("xfs") + validate_filesystem("vfat") + + def test_rejects_invalid(self): + with pytest.raises(InvalidParameterError): + validate_filesystem("ntfs") + + +# --- validate_partition_table --- + + +class TestPartitionTable: + def test_valid_types(self): + validate_partition_table("gpt") + validate_partition_table("msdos") + + def test_rejects_invalid(self): + with pytest.raises(InvalidParameterError): + validate_partition_table("mbr") + + +# --- validate_snapname --- + + +class TestSnapname: + def test_valid_names(self): + validate_snapname("snap1") + validate_snapname("my-snap.v2") + validate_snapname("A_long_name") + validate_snapname("before-upgrade") + + def test_rejects_starting_with_digit(self): + with pytest.raises(InvalidParameterError): + validate_snapname("1snap") + + def test_rejects_special_chars(self): + with pytest.raises(InvalidParameterError): + validate_snapname("snap name") + + def test_rejects_shell_injection(self): + with pytest.raises(InvalidParameterError): + validate_snapname("snap;rm -rf /") + + def test_rejects_empty(self): + with pytest.raises(InvalidParameterError): + validate_snapname("") + + +# --- validate_uuid --- + + +class TestUUID: + def test_valid_uuid(self): + assert validate_uuid("550e8400-e29b-41d4-a716-446655440000") == ( + "550e8400-e29b-41d4-a716-446655440000" + ) + + def test_valid_uuid_uppercase(self): + assert validate_uuid("550E8400-E29B-41D4-A716-446655440000") == ( + "550E8400-E29B-41D4-A716-446655440000" + ) + + def test_rejects_empty(self): + with pytest.raises(InvalidParameterError): + validate_uuid("") + + def test_rejects_shell_injection(self): + with pytest.raises(InvalidParameterError): + validate_uuid("'; rm -rf /; echo '") + + def test_rejects_wrong_format(self): + with pytest.raises(InvalidParameterError): + validate_uuid("not-a-uuid") + + def test_rejects_too_short(self): + with pytest.raises(InvalidParameterError): + validate_uuid("550e8400-e29b-41d4-a716") diff --git a/tests/test_ssh.py b/tests/test_ssh.py new file mode 100644 index 0000000..07c93e4 --- /dev/null +++ b/tests/test_ssh.py @@ -0,0 +1,202 @@ +"""Tests for SSH execution module.""" + +from unittest.mock import MagicMock, patch + +import paramiko +import pytest + +from proxmox_mcp.ssh import SSHExecutor, SSHResult +from proxmox_mcp.utils.errors import SSHExecutionError + + +@pytest.fixture +def mock_config(): + config = MagicMock() + config.PROXMOX_HOST = "192.168.1.100" + config.PROXMOX_SSH_PORT = 22 + config.PROXMOX_SSH_USER = "root" + config.PROXMOX_SSH_KEY_PATH = None + config.PROXMOX_SSH_PASSWORD = "testpass" + config.PROXMOX_PASSWORD = None + config.PROXMOX_SSH_HOST_KEY_CHECKING = False + config.PROXMOX_SSH_KNOWN_HOSTS = "" + return config + + +# --- SSHResult --- + + +class TestSSHResult: + def test_success_property(self): + result = SSHResult(exit_code=0, stdout="ok", stderr="") + assert result.success is True + + def test_failure_property(self): + result = SSHResult(exit_code=1, stdout="", stderr="error") + assert result.success is False + + +# --- SSHExecutor --- + + +class TestSSHExecutor: + def test_init(self, mock_config): + executor = SSHExecutor(mock_config) + assert executor.config is mock_config + + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_execute_sync_success(self, mock_ssh_class, mock_config): + # Setup mock SSH client + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + + mock_stdout = MagicMock() + mock_stdout.read.return_value = b"output data" + mock_stdout.channel.recv_exit_status.return_value = 0 + + mock_stderr = MagicMock() + mock_stderr.read.return_value = b"" + + mock_client.exec_command.return_value = (MagicMock(), mock_stdout, mock_stderr) + + executor = SSHExecutor(mock_config) + result = executor._execute_sync("192.168.1.100", "echo hello", timeout=30) + + assert result.exit_code == 0 + assert result.stdout == "output data" + assert result.stderr == "" + mock_client.connect.assert_called_once() + mock_client.close.assert_called_once() + + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_execute_sync_failure(self, mock_ssh_class, mock_config): + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + + mock_stdout = MagicMock() + mock_stdout.read.return_value = b"" + mock_stdout.channel.recv_exit_status.return_value = 1 + + mock_stderr = MagicMock() + mock_stderr.read.return_value = b"command not found" + + mock_client.exec_command.return_value = (MagicMock(), mock_stdout, mock_stderr) + + executor = SSHExecutor(mock_config) + result = executor._execute_sync("192.168.1.100", "bad_command", timeout=30) + + assert result.exit_code == 1 + assert result.stderr == "command not found" + + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_execute_sync_connection_error(self, mock_ssh_class, mock_config): + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + mock_client.connect.side_effect = Exception("Connection refused") + + executor = SSHExecutor(mock_config) + with pytest.raises(SSHExecutionError, match="SSH connection"): + executor._execute_sync("192.168.1.100", "echo hello") + + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_execute_sync_with_key_path(self, mock_ssh_class, mock_config): + mock_config.PROXMOX_SSH_KEY_PATH = "/tmp/test_key" + mock_config.PROXMOX_SSH_PASSWORD = None + + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + + # Key file doesn't exist + executor = SSHExecutor(mock_config) + with pytest.raises(SSHExecutionError, match="SSH key not found"): + executor._execute_sync("192.168.1.100", "echo hello") + + @pytest.mark.asyncio + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + async def test_execute_async(self, mock_ssh_class, mock_config): + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + + mock_stdout = MagicMock() + mock_stdout.read.return_value = b"async output" + mock_stdout.channel.recv_exit_status.return_value = 0 + + mock_stderr = MagicMock() + mock_stderr.read.return_value = b"" + + mock_client.exec_command.return_value = (MagicMock(), mock_stdout, mock_stderr) + + executor = SSHExecutor(mock_config) + result = await executor.execute("pve1", "echo hello", timeout=30) + + assert result.exit_code == 0 + assert result.stdout == "async output" + + def test_timeout_capped_at_max(self, mock_config): + SSHExecutor(mock_config) # Verifies construction succeeds + + +class TestSSHHostKeyVerification: + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_reject_policy_when_checking_enabled(self, mock_ssh_class, mock_config): + mock_config.PROXMOX_SSH_HOST_KEY_CHECKING = True + mock_config.PROXMOX_SSH_KNOWN_HOSTS = "" + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + + # Simulate reject policy raising on unknown host + mock_client.connect.side_effect = paramiko.SSHException("Unknown host key") + + executor = SSHExecutor(mock_config) + with pytest.raises(SSHExecutionError, match="SSH connection"): + executor._execute_sync("192.168.1.100", "echo hello") + + mock_client.set_missing_host_key_policy.assert_called_once() + policy_arg = mock_client.set_missing_host_key_policy.call_args[0][0] + assert isinstance(policy_arg, paramiko.RejectPolicy) + + @patch("proxmox_mcp.ssh.os.path.exists", return_value=True) + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_loads_custom_known_hosts_file(self, mock_ssh_class, mock_exists, mock_config): + mock_config.PROXMOX_SSH_HOST_KEY_CHECKING = True + mock_config.PROXMOX_SSH_KNOWN_HOSTS = "/custom/known_hosts" + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + mock_client.connect.side_effect = Exception("test") + + executor = SSHExecutor(mock_config) + with pytest.raises(SSHExecutionError): + executor._execute_sync("192.168.1.100", "echo hello") + + mock_client.load_host_keys.assert_called_once_with("/custom/known_hosts") + + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_warning_policy_when_checking_disabled(self, mock_ssh_class, mock_config): + mock_config.PROXMOX_SSH_HOST_KEY_CHECKING = False + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + mock_client.connect.side_effect = Exception("test") + + executor = SSHExecutor(mock_config) + with pytest.raises(SSHExecutionError): + executor._execute_sync("192.168.1.100", "echo hello") + + policy_arg = mock_client.set_missing_host_key_policy.call_args[0][0] + assert isinstance(policy_arg, paramiko.WarningPolicy) + + @patch("proxmox_mcp.ssh.os.path.exists", return_value=True) + @patch("proxmox_mcp.ssh.paramiko.SSHClient") + def test_default_known_hosts_path(self, mock_ssh_class, mock_exists, mock_config): + mock_config.PROXMOX_SSH_HOST_KEY_CHECKING = True + mock_config.PROXMOX_SSH_KNOWN_HOSTS = "" # Empty = use default + mock_client = MagicMock() + mock_ssh_class.return_value = mock_client + mock_client.connect.side_effect = Exception("test") + + executor = SSHExecutor(mock_config) + with pytest.raises(SSHExecutionError): + executor._execute_sync("192.168.1.100", "echo hello") + + # Should load from ~/.ssh/known_hosts + loaded_path = mock_client.load_host_keys.call_args[0][0] + assert loaded_path.endswith(".ssh/known_hosts") diff --git a/tests/test_storage_tools.py b/tests/test_storage_tools.py index 1d392fb..6797b7f 100644 --- a/tests/test_storage_tools.py +++ b/tests/test_storage_tools.py @@ -1,7 +1,8 @@ """Tests for storage tools.""" +from unittest.mock import AsyncMock, MagicMock, patch + import pytest -from unittest.mock import MagicMock, AsyncMock, patch @pytest.fixture @@ -15,6 +16,7 @@ def mock_client(): # --- list_storage --- + @pytest.mark.asyncio async def test_list_storage(mock_client): from proxmox_mcp.tools.storage import list_storage @@ -76,6 +78,7 @@ async def test_list_storage_error(mock_client): # --- get_storage_status --- + @pytest.mark.asyncio async def test_get_storage_status(mock_client): from proxmox_mcp.tools.storage import get_storage_status @@ -115,6 +118,7 @@ async def test_get_storage_status_error(mock_client): # --- list_storage_content --- + @pytest.mark.asyncio async def test_list_storage_content(mock_client): from proxmox_mcp.tools.storage import list_storage_content @@ -174,6 +178,7 @@ async def test_list_storage_content_error(mock_client): # --- get_available_isos --- + @pytest.mark.asyncio async def test_get_available_isos(mock_client): from proxmox_mcp.tools.storage import get_available_isos @@ -207,6 +212,7 @@ async def test_get_available_isos_custom_storage(mock_client): # --- get_available_templates --- + @pytest.mark.asyncio async def test_get_available_templates(mock_client): from proxmox_mcp.tools.storage import get_available_templates @@ -243,3 +249,61 @@ async def test_get_available_templates_error(mock_client): result = await get_available_templates("pve1") assert result["status"] == "error" + + +# --- download_to_storage --- + + +@pytest.mark.asyncio +async def test_download_to_storage(mock_client): + from proxmox_mcp.tools.storage import download_to_storage + + mock_client.api_call.return_value = "UPID:pve1:00001:download" + mock_client.is_dry_run = False + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await download_to_storage( + node="pve1", + storage="local", + url="https://example.com/ubuntu.iso", + content="iso", + filename="ubuntu.iso", + ) + + assert result["status"] == "submitted" + + +@pytest.mark.asyncio +async def test_download_to_storage_invalid_content(mock_client): + from proxmox_mcp.tools.storage import download_to_storage + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await download_to_storage( + node="pve1", + storage="local", + url="https://example.com/file.tar", + content="backup", + filename="file.tar", + ) + + assert result["status"] == "error" + assert "iso" in result["message"] + + +@pytest.mark.asyncio +async def test_download_to_storage_dry_run(mock_client): + from proxmox_mcp.tools.storage import download_to_storage + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = {"status": "dry_run"} + + with patch("proxmox_mcp.tools.storage.get_client", return_value=mock_client): + result = await download_to_storage( + node="pve1", + storage="local", + url="https://example.com/ubuntu.iso", + content="iso", + filename="ubuntu.iso", + ) + + assert result["status"] == "dry_run" diff --git a/tests/test_task_tools.py b/tests/test_task_tools.py index e21e59b..5d6c28e 100644 --- a/tests/test_task_tools.py +++ b/tests/test_task_tools.py @@ -1,7 +1,8 @@ """Tests for task tracking tools.""" +from unittest.mock import AsyncMock, MagicMock, patch + import pytest -from unittest.mock import MagicMock, AsyncMock, patch @pytest.fixture @@ -15,6 +16,7 @@ def mock_client(): # --- list_tasks --- + @pytest.mark.asyncio async def test_list_tasks_single_node(mock_client): from proxmox_mcp.tools.task import list_tasks @@ -137,6 +139,7 @@ async def test_list_tasks_error(mock_client): # --- get_task_status --- + @pytest.mark.asyncio async def test_get_task_status(mock_client): from proxmox_mcp.tools.task import get_task_status @@ -191,6 +194,7 @@ async def test_get_task_status_error(mock_client): # --- get_task_log --- + @pytest.mark.asyncio async def test_get_task_log(mock_client): from proxmox_mcp.tools.task import get_task_log @@ -238,6 +242,7 @@ async def test_get_task_log_error(mock_client): # --- wait_for_task --- + @pytest.mark.asyncio async def test_wait_for_task_immediate_complete(mock_client): from proxmox_mcp.tools.task import wait_for_task diff --git a/tests/test_validators.py b/tests/test_validators.py index fc3dd8f..b4b74c6 100644 --- a/tests/test_validators.py +++ b/tests/test_validators.py @@ -1,6 +1,7 @@ import pytest -from proxmox_mcp.utils.validators import validate_vmid, validate_node_name + from proxmox_mcp.utils.errors import InvalidParameterError +from proxmox_mcp.utils.validators import validate_node_name, validate_vmid def test_validate_vmid_valid(): diff --git a/tests/test_vm_tools.py b/tests/test_vm_tools.py index 2060a1e..c7a2bca 100644 --- a/tests/test_vm_tools.py +++ b/tests/test_vm_tools.py @@ -1,7 +1,8 @@ """Tests for VM tools.""" +from unittest.mock import AsyncMock, MagicMock, patch + import pytest -from unittest.mock import MagicMock, patch, AsyncMock @pytest.fixture @@ -13,6 +14,7 @@ def mock_client(): client.config.PROXMOX_DRY_RUN = False client.is_dry_run = False client.resolve_node_for_vmid = AsyncMock(return_value="pve1") + client.resolve_node = AsyncMock(return_value="pve1") mock_get.return_value = client yield client @@ -20,11 +22,24 @@ def mock_client(): @pytest.mark.asyncio async def test_list_vms(mock_client): from proxmox_mcp.tools.vm import list_vms - mock_client.api_call = AsyncMock(return_value=[ - {"vmid": 100, "name": "vm1", "status": "running", "node": "pve1", - "type": "qemu", "maxcpu": 2, "maxmem": 2147483648, "mem": 1073741824, - "maxdisk": 34359738368, "uptime": 3600, "cpu": 0.05}, - ]) + + mock_client.api_call = AsyncMock( + return_value=[ + { + "vmid": 100, + "name": "vm1", + "status": "running", + "node": "pve1", + "type": "qemu", + "maxcpu": 2, + "maxmem": 2147483648, + "mem": 1073741824, + "maxdisk": 34359738368, + "uptime": 3600, + "cpu": 0.05, + }, + ] + ) result = await list_vms() assert result["status"] == "success" assert len(result["vms"]) == 1 @@ -34,12 +49,35 @@ async def test_list_vms(mock_client): @pytest.mark.asyncio async def test_list_vms_filter_running(mock_client): from proxmox_mcp.tools.vm import list_vms - mock_client.api_call = AsyncMock(return_value=[ - {"vmid": 100, "status": "running", "name": "a", "type": "qemu", - "maxcpu": 1, "maxmem": 0, "mem": 0, "maxdisk": 0, "uptime": 0, "cpu": 0}, - {"vmid": 101, "status": "stopped", "name": "b", "type": "qemu", - "maxcpu": 1, "maxmem": 0, "mem": 0, "maxdisk": 0, "uptime": 0, "cpu": 0}, - ]) + + mock_client.api_call = AsyncMock( + return_value=[ + { + "vmid": 100, + "status": "running", + "name": "a", + "type": "qemu", + "maxcpu": 1, + "maxmem": 0, + "mem": 0, + "maxdisk": 0, + "uptime": 0, + "cpu": 0, + }, + { + "vmid": 101, + "status": "stopped", + "name": "b", + "type": "qemu", + "maxcpu": 1, + "maxmem": 0, + "mem": 0, + "maxdisk": 0, + "uptime": 0, + "cpu": 0, + }, + ] + ) result = await list_vms(status_filter="running") assert len(result["vms"]) == 1 @@ -47,10 +85,18 @@ async def test_list_vms_filter_running(mock_client): @pytest.mark.asyncio async def test_get_vm_status(mock_client): from proxmox_mcp.tools.vm import get_vm_status - mock_client.api_call = AsyncMock(return_value={ - "status": "running", "vmid": 100, "name": "test", "qmpstatus": "running", - "cpu": 0.1, "maxmem": 2147483648, "mem": 1073741824, - }) + + mock_client.api_call = AsyncMock( + return_value={ + "status": "running", + "vmid": 100, + "name": "test", + "qmpstatus": "running", + "cpu": 0.1, + "maxmem": 2147483648, + "mem": 1073741824, + } + ) result = await get_vm_status(vmid=100) assert result["status"] == "success" @@ -58,17 +104,24 @@ async def test_get_vm_status(mock_client): @pytest.mark.asyncio async def test_get_vm_status_auto_detect_node(mock_client): from proxmox_mcp.tools.vm import get_vm_status + mock_client.api_call = AsyncMock(return_value={"status": "running", "vmid": 100}) await get_vm_status(vmid=100) - mock_client.resolve_node_for_vmid.assert_called_once_with(100) + mock_client.resolve_node.assert_called_once_with(100, None) @pytest.mark.asyncio async def test_get_vm_config(mock_client): from proxmox_mcp.tools.vm import get_vm_config - mock_client.api_call = AsyncMock(return_value={ - "name": "test", "memory": 2048, "cores": 2, "sockets": 1, - }) + + mock_client.api_call = AsyncMock( + return_value={ + "name": "test", + "memory": 2048, + "cores": 2, + "sockets": 1, + } + ) result = await get_vm_config(vmid=100) assert result["status"] == "success" @@ -76,6 +129,7 @@ async def test_get_vm_config(mock_client): @pytest.mark.asyncio async def test_get_vm_rrd_data(mock_client): from proxmox_mcp.tools.vm import get_vm_rrd_data + mock_client.api_call = AsyncMock(return_value=[{"time": 1000, "cpu": 0.1}]) result = await get_vm_rrd_data(vmid=100, timeframe="hour") assert result["status"] == "success" @@ -84,6 +138,7 @@ async def test_get_vm_rrd_data(mock_client): @pytest.mark.asyncio async def test_start_vm(mock_client): from proxmox_mcp.tools.vm import start_vm + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00001:start") result = await start_vm(vmid=100) assert result["status"] == "submitted" @@ -92,6 +147,7 @@ async def test_start_vm(mock_client): @pytest.mark.asyncio async def test_stop_vm(mock_client): from proxmox_mcp.tools.vm import stop_vm + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00002:stop") result = await stop_vm(vmid=100) assert result["status"] == "submitted" @@ -101,6 +157,7 @@ async def test_stop_vm(mock_client): async def test_stop_vm_protected(mock_client): from proxmox_mcp.tools.vm import stop_vm from proxmox_mcp.utils.errors import ProtectedResourceError + mock_client.check_protected.side_effect = ProtectedResourceError("protected") result = await stop_vm(vmid=100) assert result["status"] == "error" @@ -109,9 +166,13 @@ async def test_stop_vm_protected(mock_client): @pytest.mark.asyncio async def test_stop_vm_dry_run(mock_client): from proxmox_mcp.tools.vm import stop_vm + mock_client.is_dry_run = True mock_client.dry_run_response.return_value = { - "status": "dry_run", "action": "stop_vm", "params": {}, "message": "DRY RUN" + "status": "dry_run", + "action": "stop_vm", + "params": {}, + "message": "DRY RUN", } result = await stop_vm(vmid=100) assert result["status"] == "dry_run" @@ -120,6 +181,7 @@ async def test_stop_vm_dry_run(mock_client): @pytest.mark.asyncio async def test_shutdown_vm(mock_client): from proxmox_mcp.tools.vm import shutdown_vm + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00003:shutdown") result = await shutdown_vm(vmid=100) assert result["status"] == "submitted" @@ -128,6 +190,7 @@ async def test_shutdown_vm(mock_client): @pytest.mark.asyncio async def test_delete_vm_requires_confirm(mock_client): from proxmox_mcp.tools.vm import delete_vm + mock_client.api_call = AsyncMock( return_value={"status": "running", "vmid": 100, "name": "test"} ) @@ -138,6 +201,7 @@ async def test_delete_vm_requires_confirm(mock_client): @pytest.mark.asyncio async def test_delete_vm_confirmed(mock_client): from proxmox_mcp.tools.vm import delete_vm + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00010:destroy") result = await delete_vm(vmid=100, confirm=True) assert result["status"] == "submitted" @@ -146,6 +210,7 @@ async def test_delete_vm_confirmed(mock_client): @pytest.mark.asyncio async def test_clone_vm(mock_client): from proxmox_mcp.tools.vm import clone_vm + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00004:clone") result = await clone_vm(vmid=100, newid=200, name="clone-vm") assert result["status"] == "submitted" @@ -154,6 +219,7 @@ async def test_clone_vm(mock_client): @pytest.mark.asyncio async def test_create_vm(mock_client): from proxmox_mcp.tools.vm import create_vm + mock_client.api_call = AsyncMock(return_value="UPID:pve1:00005:create") result = await create_vm(node="pve1", name="new-vm") assert result["status"] == "submitted" @@ -162,6 +228,169 @@ async def test_create_vm(mock_client): @pytest.mark.asyncio async def test_modify_vm_config(mock_client): from proxmox_mcp.tools.vm import modify_vm_config + mock_client.api_call = AsyncMock(return_value=None) result = await modify_vm_config(vmid=100, memory=4096, cores=4) assert result["status"] == "success" + + +@pytest.mark.asyncio +async def test_modify_vm_config_blocks_hookscript(mock_client): + from proxmox_mcp.tools.vm import modify_vm_config + + result = await modify_vm_config( + vmid=100, extra_config='{"hookscript": "local:snippets/evil.sh"}' + ) + assert result["status"] == "error" + assert "hookscript" in result["message"] + + +@pytest.mark.asyncio +async def test_modify_vm_config_blocks_hostpci(mock_client): + from proxmox_mcp.tools.vm import modify_vm_config + + result = await modify_vm_config(vmid=100, extra_config='{"hostpci0": "01:00.0"}') + assert result["status"] == "error" + assert "hostpci0" in result["message"] + + +@pytest.mark.asyncio +async def test_modify_vm_config_allows_safe_keys(mock_client): + from proxmox_mcp.tools.vm import modify_vm_config + + mock_client.api_call = AsyncMock(return_value=None) + result = await modify_vm_config( + vmid=100, extra_config='{"memory": 8192, "cores": 4, "agent": "1"}' + ) + assert result["status"] == "success" + assert "memory" in result["changes"] + assert "cores" in result["changes"] + + +@pytest.mark.asyncio +async def test_modify_vm_config_blocks_unknown_key(mock_client): + from proxmox_mcp.tools.vm import modify_vm_config + + result = await modify_vm_config(vmid=100, extra_config='{"some_unknown_key": "value"}') + assert result["status"] == "error" + assert "some_unknown_key" in result["message"] + + +@pytest.mark.asyncio +async def test_resize_vm_disk(mock_client): + from proxmox_mcp.tools.vm import resize_vm_disk + + mock_client.api_call = AsyncMock(return_value=None) + result = await resize_vm_disk(vmid=100, disk="scsi0", size="+10G") + assert result["status"] == "success" + assert result["disk"] == "scsi0" + assert result["size"] == "+10G" + + +@pytest.mark.asyncio +async def test_resize_vm_disk_protected(mock_client): + from proxmox_mcp.tools.vm import resize_vm_disk + from proxmox_mcp.utils.errors import ProtectedResourceError + + mock_client.check_protected.side_effect = ProtectedResourceError("protected") + result = await resize_vm_disk(vmid=100, disk="scsi0", size="+10G") + assert result["status"] == "error" + + +@pytest.mark.asyncio +async def test_resize_vm_disk_dry_run(mock_client): + from proxmox_mcp.tools.vm import resize_vm_disk + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = {"status": "dry_run"} + result = await resize_vm_disk(vmid=100, disk="scsi0", size="+10G") + assert result["status"] == "dry_run" + + +@pytest.mark.asyncio +async def test_convert_vm_to_template_requires_confirm(mock_client): + from proxmox_mcp.tools.vm import convert_vm_to_template + + result = await convert_vm_to_template(vmid=100) + assert result["status"] == "confirmation_required" + + +@pytest.mark.asyncio +async def test_convert_vm_to_template_confirmed(mock_client): + from proxmox_mcp.tools.vm import convert_vm_to_template + + mock_client.api_call = AsyncMock(return_value=None) + result = await convert_vm_to_template(vmid=100, confirm=True) + assert result["status"] == "success" + assert "template" in result["message"] + + +@pytest.mark.asyncio +async def test_convert_vm_to_template_protected(mock_client): + from proxmox_mcp.tools.vm import convert_vm_to_template + from proxmox_mcp.utils.errors import ProtectedResourceError + + mock_client.check_protected.side_effect = ProtectedResourceError("protected") + result = await convert_vm_to_template(vmid=100, confirm=True) + assert result["status"] == "error" + + +@pytest.mark.asyncio +async def test_start_vm_protected(mock_client): + from proxmox_mcp.tools.vm import start_vm + from proxmox_mcp.utils.errors import ProtectedResourceError + + mock_client.check_protected.side_effect = ProtectedResourceError("protected") + result = await start_vm(vmid=100) + assert result["status"] == "error" + + +@pytest.mark.asyncio +async def test_set_vm_cloudinit(mock_client): + from proxmox_mcp.tools.vm import set_vm_cloudinit + + mock_client.api_call = AsyncMock(return_value=None) + result = await set_vm_cloudinit( + vmid=100, ciuser="admin", ipconfig0="ip=dhcp" + ) + assert result["status"] == "success" + assert "ciuser" in result["changes"] + assert "ipconfig0" in result["changes"] + + +@pytest.mark.asyncio +async def test_set_vm_cloudinit_no_changes(mock_client): + from proxmox_mcp.tools.vm import set_vm_cloudinit + + result = await set_vm_cloudinit(vmid=100) + assert result["status"] == "error" + + +@pytest.mark.asyncio +async def test_set_vm_cloudinit_dry_run(mock_client): + from proxmox_mcp.tools.vm import set_vm_cloudinit + + mock_client.is_dry_run = True + mock_client.dry_run_response.return_value = {"status": "dry_run"} + result = await set_vm_cloudinit(vmid=100, ciuser="admin") + assert result["status"] == "dry_run" + + +@pytest.mark.asyncio +async def test_regenerate_cloudinit_image(mock_client): + from proxmox_mcp.tools.vm import regenerate_cloudinit_image + + mock_client.api_call = AsyncMock(return_value=None) + result = await regenerate_cloudinit_image(vmid=100) + assert result["status"] == "success" + assert "regenerated" in result["message"] + + +@pytest.mark.asyncio +async def test_regenerate_cloudinit_image_protected(mock_client): + from proxmox_mcp.tools.vm import regenerate_cloudinit_image + from proxmox_mcp.utils.errors import ProtectedResourceError + + mock_client.check_protected.side_effect = ProtectedResourceError("protected") + result = await regenerate_cloudinit_image(vmid=100) + assert result["status"] == "error" diff --git a/uv.lock b/uv.lock index 9aa8278..2d5dd8e 100644 --- a/uv.lock +++ b/uv.lock @@ -42,6 +42,76 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, ] +[[package]] +name = "bcrypt" +version = "5.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d4/36/3329e2518d70ad8e2e5817d5a4cac6bba05a47767ec416c7d020a965f408/bcrypt-5.0.0.tar.gz", hash = "sha256:f748f7c2d6fd375cc93d3fba7ef4a9e3a092421b8dbf34d8d4dc06be9492dfdd", size = 25386, upload-time = "2025-09-25T19:50:47.829Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/85/3e65e01985fddf25b64ca67275bb5bdb4040bd1a53b66d355c6c37c8a680/bcrypt-5.0.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f3c08197f3039bec79cee59a606d62b96b16669cff3949f21e74796b6e3cd2be", size = 481806, upload-time = "2025-09-25T19:49:05.102Z" }, + { url = "https://files.pythonhosted.org/packages/44/dc/01eb79f12b177017a726cbf78330eb0eb442fae0e7b3dfd84ea2849552f3/bcrypt-5.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:200af71bc25f22006f4069060c88ed36f8aa4ff7f53e67ff04d2ab3f1e79a5b2", size = 268626, upload-time = "2025-09-25T19:49:06.723Z" }, + { url = "https://files.pythonhosted.org/packages/8c/cf/e82388ad5959c40d6afd94fb4743cc077129d45b952d46bdc3180310e2df/bcrypt-5.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:baade0a5657654c2984468efb7d6c110db87ea63ef5a4b54732e7e337253e44f", size = 271853, upload-time = "2025-09-25T19:49:08.028Z" }, + { url = "https://files.pythonhosted.org/packages/ec/86/7134b9dae7cf0efa85671651341f6afa695857fae172615e960fb6a466fa/bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c58b56cdfb03202b3bcc9fd8daee8e8e9b6d7e3163aa97c631dfcfcc24d36c86", size = 269793, upload-time = "2025-09-25T19:49:09.727Z" }, + { url = "https://files.pythonhosted.org/packages/cc/82/6296688ac1b9e503d034e7d0614d56e80c5d1a08402ff856a4549cb59207/bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4bfd2a34de661f34d0bda43c3e4e79df586e4716ef401fe31ea39d69d581ef23", size = 289930, upload-time = "2025-09-25T19:49:11.204Z" }, + { url = "https://files.pythonhosted.org/packages/d1/18/884a44aa47f2a3b88dd09bc05a1e40b57878ecd111d17e5bba6f09f8bb77/bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ed2e1365e31fc73f1825fa830f1c8f8917ca1b3ca6185773b349c20fd606cec2", size = 272194, upload-time = "2025-09-25T19:49:12.524Z" }, + { url = "https://files.pythonhosted.org/packages/0e/8f/371a3ab33c6982070b674f1788e05b656cfbf5685894acbfef0c65483a59/bcrypt-5.0.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:83e787d7a84dbbfba6f250dd7a5efd689e935f03dd83b0f919d39349e1f23f83", size = 269381, upload-time = "2025-09-25T19:49:14.308Z" }, + { url = "https://files.pythonhosted.org/packages/b1/34/7e4e6abb7a8778db6422e88b1f06eb07c47682313997ee8a8f9352e5a6f1/bcrypt-5.0.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:137c5156524328a24b9fac1cb5db0ba618bc97d11970b39184c1d87dc4bf1746", size = 271750, upload-time = "2025-09-25T19:49:15.584Z" }, + { url = "https://files.pythonhosted.org/packages/c0/1b/54f416be2499bd72123c70d98d36c6cd61a4e33d9b89562c22481c81bb30/bcrypt-5.0.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:38cac74101777a6a7d3b3e3cfefa57089b5ada650dce2baf0cbdd9d65db22a9e", size = 303757, upload-time = "2025-09-25T19:49:17.244Z" }, + { url = "https://files.pythonhosted.org/packages/13/62/062c24c7bcf9d2826a1a843d0d605c65a755bc98002923d01fd61270705a/bcrypt-5.0.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:d8d65b564ec849643d9f7ea05c6d9f0cd7ca23bdd4ac0c2dbef1104ab504543d", size = 306740, upload-time = "2025-09-25T19:49:18.693Z" }, + { url = "https://files.pythonhosted.org/packages/d5/c8/1fdbfc8c0f20875b6b4020f3c7dc447b8de60aa0be5faaf009d24242aec9/bcrypt-5.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:741449132f64b3524e95cd30e5cd3343006ce146088f074f31ab26b94e6c75ba", size = 334197, upload-time = "2025-09-25T19:49:20.523Z" }, + { url = "https://files.pythonhosted.org/packages/a6/c1/8b84545382d75bef226fbc6588af0f7b7d095f7cd6a670b42a86243183cd/bcrypt-5.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:212139484ab3207b1f0c00633d3be92fef3c5f0af17cad155679d03ff2ee1e41", size = 352974, upload-time = "2025-09-25T19:49:22.254Z" }, + { url = "https://files.pythonhosted.org/packages/10/a6/ffb49d4254ed085e62e3e5dd05982b4393e32fe1e49bb1130186617c29cd/bcrypt-5.0.0-cp313-cp313t-win32.whl", hash = "sha256:9d52ed507c2488eddd6a95bccee4e808d3234fa78dd370e24bac65a21212b861", size = 148498, upload-time = "2025-09-25T19:49:24.134Z" }, + { url = "https://files.pythonhosted.org/packages/48/a9/259559edc85258b6d5fc5471a62a3299a6aa37a6611a169756bf4689323c/bcrypt-5.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f6984a24db30548fd39a44360532898c33528b74aedf81c26cf29c51ee47057e", size = 145853, upload-time = "2025-09-25T19:49:25.702Z" }, + { url = "https://files.pythonhosted.org/packages/2d/df/9714173403c7e8b245acf8e4be8876aac64a209d1b392af457c79e60492e/bcrypt-5.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9fffdb387abe6aa775af36ef16f55e318dcda4194ddbf82007a6f21da29de8f5", size = 139626, upload-time = "2025-09-25T19:49:26.928Z" }, + { url = "https://files.pythonhosted.org/packages/f8/14/c18006f91816606a4abe294ccc5d1e6f0e42304df5a33710e9e8e95416e1/bcrypt-5.0.0-cp314-cp314t-macosx_10_12_universal2.whl", hash = "sha256:4870a52610537037adb382444fefd3706d96d663ac44cbb2f37e3919dca3d7ef", size = 481862, upload-time = "2025-09-25T19:49:28.365Z" }, + { url = "https://files.pythonhosted.org/packages/67/49/dd074d831f00e589537e07a0725cf0e220d1f0d5d8e85ad5bbff251c45aa/bcrypt-5.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48f753100931605686f74e27a7b49238122aa761a9aefe9373265b8b7aa43ea4", size = 268544, upload-time = "2025-09-25T19:49:30.39Z" }, + { url = "https://files.pythonhosted.org/packages/f5/91/50ccba088b8c474545b034a1424d05195d9fcbaaf802ab8bfe2be5a4e0d7/bcrypt-5.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f70aadb7a809305226daedf75d90379c397b094755a710d7014b8b117df1ebbf", size = 271787, upload-time = "2025-09-25T19:49:32.144Z" }, + { url = "https://files.pythonhosted.org/packages/aa/e7/d7dba133e02abcda3b52087a7eea8c0d4f64d3e593b4fffc10c31b7061f3/bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:744d3c6b164caa658adcb72cb8cc9ad9b4b75c7db507ab4bc2480474a51989da", size = 269753, upload-time = "2025-09-25T19:49:33.885Z" }, + { url = "https://files.pythonhosted.org/packages/33/fc/5b145673c4b8d01018307b5c2c1fc87a6f5a436f0ad56607aee389de8ee3/bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a28bc05039bdf3289d757f49d616ab3efe8cf40d8e8001ccdd621cd4f98f4fc9", size = 289587, upload-time = "2025-09-25T19:49:35.144Z" }, + { url = "https://files.pythonhosted.org/packages/27/d7/1ff22703ec6d4f90e62f1a5654b8867ef96bafb8e8102c2288333e1a6ca6/bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7f277a4b3390ab4bebe597800a90da0edae882c6196d3038a73adf446c4f969f", size = 272178, upload-time = "2025-09-25T19:49:36.793Z" }, + { url = "https://files.pythonhosted.org/packages/c8/88/815b6d558a1e4d40ece04a2f84865b0fef233513bd85fd0e40c294272d62/bcrypt-5.0.0-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:79cfa161eda8d2ddf29acad370356b47f02387153b11d46042e93a0a95127493", size = 269295, upload-time = "2025-09-25T19:49:38.164Z" }, + { url = "https://files.pythonhosted.org/packages/51/8c/e0db387c79ab4931fc89827d37608c31cc57b6edc08ccd2386139028dc0d/bcrypt-5.0.0-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a5393eae5722bcef046a990b84dff02b954904c36a194f6cfc817d7dca6c6f0b", size = 271700, upload-time = "2025-09-25T19:49:39.917Z" }, + { url = "https://files.pythonhosted.org/packages/06/83/1570edddd150f572dbe9fc00f6203a89fc7d4226821f67328a85c330f239/bcrypt-5.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7f4c94dec1b5ab5d522750cb059bb9409ea8872d4494fd152b53cca99f1ddd8c", size = 334034, upload-time = "2025-09-25T19:49:41.227Z" }, + { url = "https://files.pythonhosted.org/packages/c9/f2/ea64e51a65e56ae7a8a4ec236c2bfbdd4b23008abd50ac33fbb2d1d15424/bcrypt-5.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0cae4cb350934dfd74c020525eeae0a5f79257e8a201c0c176f4b84fdbf2a4b4", size = 352766, upload-time = "2025-09-25T19:49:43.08Z" }, + { url = "https://files.pythonhosted.org/packages/d7/d4/1a388d21ee66876f27d1a1f41287897d0c0f1712ef97d395d708ba93004c/bcrypt-5.0.0-cp314-cp314t-win32.whl", hash = "sha256:b17366316c654e1ad0306a6858e189fc835eca39f7eb2cafd6aaca8ce0c40a2e", size = 152449, upload-time = "2025-09-25T19:49:44.971Z" }, + { url = "https://files.pythonhosted.org/packages/3f/61/3291c2243ae0229e5bca5d19f4032cecad5dfb05a2557169d3a69dc0ba91/bcrypt-5.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:92864f54fb48b4c718fc92a32825d0e42265a627f956bc0361fe869f1adc3e7d", size = 149310, upload-time = "2025-09-25T19:49:46.162Z" }, + { url = "https://files.pythonhosted.org/packages/3e/89/4b01c52ae0c1a681d4021e5dd3e45b111a8fb47254a274fa9a378d8d834b/bcrypt-5.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dd19cf5184a90c873009244586396a6a884d591a5323f0e8a5922560718d4993", size = 143761, upload-time = "2025-09-25T19:49:47.345Z" }, + { url = "https://files.pythonhosted.org/packages/84/29/6237f151fbfe295fe3e074ecc6d44228faa1e842a81f6d34a02937ee1736/bcrypt-5.0.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:fc746432b951e92b58317af8e0ca746efe93e66555f1b40888865ef5bf56446b", size = 494553, upload-time = "2025-09-25T19:49:49.006Z" }, + { url = "https://files.pythonhosted.org/packages/45/b6/4c1205dde5e464ea3bd88e8742e19f899c16fa8916fb8510a851fae985b5/bcrypt-5.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c2388ca94ffee269b6038d48747f4ce8df0ffbea43f31abfa18ac72f0218effb", size = 275009, upload-time = "2025-09-25T19:49:50.581Z" }, + { url = "https://files.pythonhosted.org/packages/3b/71/427945e6ead72ccffe77894b2655b695ccf14ae1866cd977e185d606dd2f/bcrypt-5.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:560ddb6ec730386e7b3b26b8b4c88197aaed924430e7b74666a586ac997249ef", size = 278029, upload-time = "2025-09-25T19:49:52.533Z" }, + { url = "https://files.pythonhosted.org/packages/17/72/c344825e3b83c5389a369c8a8e58ffe1480b8a699f46c127c34580c4666b/bcrypt-5.0.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d79e5c65dcc9af213594d6f7f1fa2c98ad3fc10431e7aa53c176b441943efbdd", size = 275907, upload-time = "2025-09-25T19:49:54.709Z" }, + { url = "https://files.pythonhosted.org/packages/0b/7e/d4e47d2df1641a36d1212e5c0514f5291e1a956a7749f1e595c07a972038/bcrypt-5.0.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2b732e7d388fa22d48920baa267ba5d97cca38070b69c0e2d37087b381c681fd", size = 296500, upload-time = "2025-09-25T19:49:56.013Z" }, + { url = "https://files.pythonhosted.org/packages/0f/c3/0ae57a68be2039287ec28bc463b82e4b8dc23f9d12c0be331f4782e19108/bcrypt-5.0.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0c8e093ea2532601a6f686edbc2c6b2ec24131ff5c52f7610dd64fa4553b5464", size = 278412, upload-time = "2025-09-25T19:49:57.356Z" }, + { url = "https://files.pythonhosted.org/packages/45/2b/77424511adb11e6a99e3a00dcc7745034bee89036ad7d7e255a7e47be7d8/bcrypt-5.0.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5b1589f4839a0899c146e8892efe320c0fa096568abd9b95593efac50a87cb75", size = 275486, upload-time = "2025-09-25T19:49:59.116Z" }, + { url = "https://files.pythonhosted.org/packages/43/0a/405c753f6158e0f3f14b00b462d8bca31296f7ecfc8fc8bc7919c0c7d73a/bcrypt-5.0.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:89042e61b5e808b67daf24a434d89bab164d4de1746b37a8d173b6b14f3db9ff", size = 277940, upload-time = "2025-09-25T19:50:00.869Z" }, + { url = "https://files.pythonhosted.org/packages/62/83/b3efc285d4aadc1fa83db385ec64dcfa1707e890eb42f03b127d66ac1b7b/bcrypt-5.0.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:e3cf5b2560c7b5a142286f69bde914494b6d8f901aaa71e453078388a50881c4", size = 310776, upload-time = "2025-09-25T19:50:02.393Z" }, + { url = "https://files.pythonhosted.org/packages/95/7d/47ee337dacecde6d234890fe929936cb03ebc4c3a7460854bbd9c97780b8/bcrypt-5.0.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f632fd56fc4e61564f78b46a2269153122db34988e78b6be8b32d28507b7eaeb", size = 312922, upload-time = "2025-09-25T19:50:04.232Z" }, + { url = "https://files.pythonhosted.org/packages/d6/3a/43d494dfb728f55f4e1cf8fd435d50c16a2d75493225b54c8d06122523c6/bcrypt-5.0.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:801cad5ccb6b87d1b430f183269b94c24f248dddbbc5c1f78b6ed231743e001c", size = 341367, upload-time = "2025-09-25T19:50:05.559Z" }, + { url = "https://files.pythonhosted.org/packages/55/ab/a0727a4547e383e2e22a630e0f908113db37904f58719dc48d4622139b5c/bcrypt-5.0.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3cf67a804fc66fc217e6914a5635000259fbbbb12e78a99488e4d5ba445a71eb", size = 359187, upload-time = "2025-09-25T19:50:06.916Z" }, + { url = "https://files.pythonhosted.org/packages/1b/bb/461f352fdca663524b4643d8b09e8435b4990f17fbf4fea6bc2a90aa0cc7/bcrypt-5.0.0-cp38-abi3-win32.whl", hash = "sha256:3abeb543874b2c0524ff40c57a4e14e5d3a66ff33fb423529c88f180fd756538", size = 153752, upload-time = "2025-09-25T19:50:08.515Z" }, + { url = "https://files.pythonhosted.org/packages/41/aa/4190e60921927b7056820291f56fc57d00d04757c8b316b2d3c0d1d6da2c/bcrypt-5.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:35a77ec55b541e5e583eb3436ffbbf53b0ffa1fa16ca6782279daf95d146dcd9", size = 150881, upload-time = "2025-09-25T19:50:09.742Z" }, + { url = "https://files.pythonhosted.org/packages/54/12/cd77221719d0b39ac0b55dbd39358db1cd1246e0282e104366ebbfb8266a/bcrypt-5.0.0-cp38-abi3-win_arm64.whl", hash = "sha256:cde08734f12c6a4e28dc6755cd11d3bdfea608d93d958fffbe95a7026ebe4980", size = 144931, upload-time = "2025-09-25T19:50:11.016Z" }, + { url = "https://files.pythonhosted.org/packages/5d/ba/2af136406e1c3839aea9ecadc2f6be2bcd1eff255bd451dd39bcf302c47a/bcrypt-5.0.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0c418ca99fd47e9c59a301744d63328f17798b5947b0f791e9af3c1c499c2d0a", size = 495313, upload-time = "2025-09-25T19:50:12.309Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ee/2f4985dbad090ace5ad1f7dd8ff94477fe089b5fab2040bd784a3d5f187b/bcrypt-5.0.0-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddb4e1500f6efdd402218ffe34d040a1196c072e07929b9820f363a1fd1f4191", size = 275290, upload-time = "2025-09-25T19:50:13.673Z" }, + { url = "https://files.pythonhosted.org/packages/e4/6e/b77ade812672d15cf50842e167eead80ac3514f3beacac8902915417f8b7/bcrypt-5.0.0-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7aeef54b60ceddb6f30ee3db090351ecf0d40ec6e2abf41430997407a46d2254", size = 278253, upload-time = "2025-09-25T19:50:15.089Z" }, + { url = "https://files.pythonhosted.org/packages/36/c4/ed00ed32f1040f7990dac7115f82273e3c03da1e1a1587a778d8cea496d8/bcrypt-5.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f0ce778135f60799d89c9693b9b398819d15f1921ba15fe719acb3178215a7db", size = 276084, upload-time = "2025-09-25T19:50:16.699Z" }, + { url = "https://files.pythonhosted.org/packages/e7/c4/fa6e16145e145e87f1fa351bbd54b429354fd72145cd3d4e0c5157cf4c70/bcrypt-5.0.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a71f70ee269671460b37a449f5ff26982a6f2ba493b3eabdd687b4bf35f875ac", size = 297185, upload-time = "2025-09-25T19:50:18.525Z" }, + { url = "https://files.pythonhosted.org/packages/24/b4/11f8a31d8b67cca3371e046db49baa7c0594d71eb40ac8121e2fc0888db0/bcrypt-5.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8429e1c410b4073944f03bd778a9e066e7fad723564a52ff91841d278dfc822", size = 278656, upload-time = "2025-09-25T19:50:19.809Z" }, + { url = "https://files.pythonhosted.org/packages/ac/31/79f11865f8078e192847d2cb526e3fa27c200933c982c5b2869720fa5fce/bcrypt-5.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:edfcdcedd0d0f05850c52ba3127b1fce70b9f89e0fe5ff16517df7e81fa3cbb8", size = 275662, upload-time = "2025-09-25T19:50:21.567Z" }, + { url = "https://files.pythonhosted.org/packages/d4/8d/5e43d9584b3b3591a6f9b68f755a4da879a59712981ef5ad2a0ac1379f7a/bcrypt-5.0.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:611f0a17aa4a25a69362dcc299fda5c8a3d4f160e2abb3831041feb77393a14a", size = 278240, upload-time = "2025-09-25T19:50:23.305Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/44590e3fc158620f680a978aafe8f87a4c4320da81ed11552f0323aa9a57/bcrypt-5.0.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:db99dca3b1fdc3db87d7c57eac0c82281242d1eabf19dcb8a6b10eb29a2e72d1", size = 311152, upload-time = "2025-09-25T19:50:24.597Z" }, + { url = "https://files.pythonhosted.org/packages/5f/85/e4fbfc46f14f47b0d20493669a625da5827d07e8a88ee460af6cd9768b44/bcrypt-5.0.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:5feebf85a9cefda32966d8171f5db7e3ba964b77fdfe31919622256f80f9cf42", size = 313284, upload-time = "2025-09-25T19:50:26.268Z" }, + { url = "https://files.pythonhosted.org/packages/25/ae/479f81d3f4594456a01ea2f05b132a519eff9ab5768a70430fa1132384b1/bcrypt-5.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3ca8a166b1140436e058298a34d88032ab62f15aae1c598580333dc21d27ef10", size = 341643, upload-time = "2025-09-25T19:50:28.02Z" }, + { url = "https://files.pythonhosted.org/packages/df/d2/36a086dee1473b14276cd6ea7f61aef3b2648710b5d7f1c9e032c29b859f/bcrypt-5.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:61afc381250c3182d9078551e3ac3a41da14154fbff647ddf52a769f588c4172", size = 359698, upload-time = "2025-09-25T19:50:31.347Z" }, + { url = "https://files.pythonhosted.org/packages/c0/f6/688d2cd64bfd0b14d805ddb8a565e11ca1fb0fd6817175d58b10052b6d88/bcrypt-5.0.0-cp39-abi3-win32.whl", hash = "sha256:64d7ce196203e468c457c37ec22390f1a61c85c6f0b8160fd752940ccfb3a683", size = 153725, upload-time = "2025-09-25T19:50:34.384Z" }, + { url = "https://files.pythonhosted.org/packages/9f/b9/9d9a641194a730bda138b3dfe53f584d61c58cd5230e37566e83ec2ffa0d/bcrypt-5.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:64ee8434b0da054d830fa8e89e1c8bf30061d539044a39524ff7dec90481e5c2", size = 150912, upload-time = "2025-09-25T19:50:35.69Z" }, + { url = "https://files.pythonhosted.org/packages/27/44/d2ef5e87509158ad2187f4dd0852df80695bb1ee0cfe0a684727b01a69e0/bcrypt-5.0.0-cp39-abi3-win_arm64.whl", hash = "sha256:f2347d3534e76bf50bca5500989d6c1d05ed64b440408057a37673282c654927", size = 144953, upload-time = "2025-09-25T19:50:37.32Z" }, + { url = "https://files.pythonhosted.org/packages/8a/75/4aa9f5a4d40d762892066ba1046000b329c7cd58e888a6db878019b282dc/bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7edda91d5ab52b15636d9c30da87d2cc84f426c72b9dba7a9b4fe142ba11f534", size = 271180, upload-time = "2025-09-25T19:50:38.575Z" }, + { url = "https://files.pythonhosted.org/packages/54/79/875f9558179573d40a9cc743038ac2bf67dfb79cecb1e8b5d70e88c94c3d/bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:046ad6db88edb3c5ece4369af997938fb1c19d6a699b9c1b27b0db432faae4c4", size = 273791, upload-time = "2025-09-25T19:50:39.913Z" }, + { url = "https://files.pythonhosted.org/packages/bc/fe/975adb8c216174bf70fc17535f75e85ac06ed5252ea077be10d9cff5ce24/bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:dcd58e2b3a908b5ecc9b9df2f0085592506ac2d5110786018ee5e160f28e0911", size = 270746, upload-time = "2025-09-25T19:50:43.306Z" }, + { url = "https://files.pythonhosted.org/packages/e4/f8/972c96f5a2b6c4b3deca57009d93e946bbdbe2241dca9806d502f29dd3ee/bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:6b8f520b61e8781efee73cba14e3e8c9556ccfb375623f4f97429544734545b4", size = 273375, upload-time = "2025-09-25T19:50:45.43Z" }, +] + [[package]] name = "certifi" version = "2026.1.4" @@ -442,6 +512,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "invoke" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/bd/b461d3424a24c80490313fd77feeb666ca4f6a28c7e72713e3d9095719b4/invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707", size = 304762, upload-time = "2025-10-11T00:36:35.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, +] + [[package]] name = "jsonschema" version = "4.26.0" @@ -651,6 +730,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, ] +[[package]] +name = "paramiko" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcrypt" }, + { name = "cryptography" }, + { name = "invoke" }, + { name = "pynacl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/e7/81fdcbc7f190cdb058cffc9431587eb289833bdd633e2002455ca9bb13d4/paramiko-4.0.0.tar.gz", hash = "sha256:6a25f07b380cc9c9a88d2b920ad37167ac4667f8d9886ccebd8f90f654b5d69f", size = 1630743, upload-time = "2025-08-04T01:02:03.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/90/a744336f5af32c433bd09af7854599682a383b37cfd78f7de263de6ad6cb/paramiko-4.0.0-py3-none-any.whl", hash = "sha256:0e20e00ac666503bf0b4eda3b6d833465a2b7aff2e2b3d79a8bba5ef144ee3b9", size = 223932, upload-time = "2025-08-04T01:02:02.029Z" }, +] + [[package]] name = "pathspec" version = "1.0.4" @@ -675,6 +769,7 @@ version = "1.0.0" source = { editable = "." } dependencies = [ { name = "mcp", extra = ["cli"] }, + { name = "paramiko" }, { name = "proxmoxer" }, { name = "pydantic" }, { name = "pydantic-settings" }, @@ -695,6 +790,7 @@ dev = [ requires-dist = [ { name = "mcp", extras = ["cli"], specifier = ">=1.9.0" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.10" }, + { name = "paramiko", specifier = ">=3.4" }, { name = "proxmoxer", specifier = ">=2.1.0" }, { name = "pydantic", specifier = ">=2.0" }, { name = "pydantic-settings", specifier = ">=2.0" }, @@ -874,6 +970,41 @@ crypto = [ { name = "cryptography" }, ] +[[package]] +name = "pynacl" +version = "1.6.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d9/9a/4019b524b03a13438637b11538c82781a5eda427394380381af8f04f467a/pynacl-1.6.2.tar.gz", hash = "sha256:018494d6d696ae03c7e656e5e74cdfd8ea1326962cc401bcf018f1ed8436811c", size = 3511692, upload-time = "2026-01-01T17:48:10.851Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/79/0e3c34dc3c4671f67d251c07aa8eb100916f250ee470df230b0ab89551b4/pynacl-1.6.2-cp314-cp314t-macosx_10_10_universal2.whl", hash = "sha256:622d7b07cc5c02c666795792931b50c91f3ce3c2649762efb1ef0d5684c81594", size = 390064, upload-time = "2026-01-01T17:31:57.264Z" }, + { url = "https://files.pythonhosted.org/packages/eb/1c/23a26e931736e13b16483795c8a6b2f641bf6a3d5238c22b070a5112722c/pynacl-1.6.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d071c6a9a4c94d79eb665db4ce5cedc537faf74f2355e4d502591d850d3913c0", size = 809370, upload-time = "2026-01-01T17:31:59.198Z" }, + { url = "https://files.pythonhosted.org/packages/87/74/8d4b718f8a22aea9e8dcc8b95deb76d4aae380e2f5b570cc70b5fd0a852d/pynacl-1.6.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe9847ca47d287af41e82be1dd5e23023d3c31a951da134121ab02e42ac218c9", size = 1408304, upload-time = "2026-01-01T17:32:01.162Z" }, + { url = "https://files.pythonhosted.org/packages/fd/73/be4fdd3a6a87fe8a4553380c2b47fbd1f7f58292eb820902f5c8ac7de7b0/pynacl-1.6.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:04316d1fc625d860b6c162fff704eb8426b1a8bcd3abacea11142cbd99a6b574", size = 844871, upload-time = "2026-01-01T17:32:02.824Z" }, + { url = "https://files.pythonhosted.org/packages/55/ad/6efc57ab75ee4422e96b5f2697d51bbcf6cdcc091e66310df91fbdc144a8/pynacl-1.6.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44081faff368d6c5553ccf55322ef2819abb40e25afaec7e740f159f74813634", size = 1446356, upload-time = "2026-01-01T17:32:04.452Z" }, + { url = "https://files.pythonhosted.org/packages/78/b7/928ee9c4779caa0a915844311ab9fb5f99585621c5d6e4574538a17dca07/pynacl-1.6.2-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:a9f9932d8d2811ce1a8ffa79dcbdf3970e7355b5c8eb0c1a881a57e7f7d96e88", size = 826814, upload-time = "2026-01-01T17:32:06.078Z" }, + { url = "https://files.pythonhosted.org/packages/f7/a9/1bdba746a2be20f8809fee75c10e3159d75864ef69c6b0dd168fc60e485d/pynacl-1.6.2-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:bc4a36b28dd72fb4845e5d8f9760610588a96d5a51f01d84d8c6ff9849968c14", size = 1411742, upload-time = "2026-01-01T17:32:07.651Z" }, + { url = "https://files.pythonhosted.org/packages/f3/2f/5e7ea8d85f9f3ea5b6b87db1d8388daa3587eed181bdeb0306816fdbbe79/pynacl-1.6.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3bffb6d0f6becacb6526f8f42adfb5efb26337056ee0831fb9a7044d1a964444", size = 801714, upload-time = "2026-01-01T17:32:09.558Z" }, + { url = "https://files.pythonhosted.org/packages/06/ea/43fe2f7eab5f200e40fb10d305bf6f87ea31b3bbc83443eac37cd34a9e1e/pynacl-1.6.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2fef529ef3ee487ad8113d287a593fa26f48ee3620d92ecc6f1d09ea38e0709b", size = 1372257, upload-time = "2026-01-01T17:32:11.026Z" }, + { url = "https://files.pythonhosted.org/packages/4d/54/c9ea116412788629b1347e415f72195c25eb2f3809b2d3e7b25f5c79f13a/pynacl-1.6.2-cp314-cp314t-win32.whl", hash = "sha256:a84bf1c20339d06dc0c85d9aea9637a24f718f375d861b2668b2f9f96fa51145", size = 231319, upload-time = "2026-01-01T17:32:12.46Z" }, + { url = "https://files.pythonhosted.org/packages/ce/04/64e9d76646abac2dccf904fccba352a86e7d172647557f35b9fe2a5ee4a1/pynacl-1.6.2-cp314-cp314t-win_amd64.whl", hash = "sha256:320ef68a41c87547c91a8b58903c9caa641ab01e8512ce291085b5fe2fcb7590", size = 244044, upload-time = "2026-01-01T17:32:13.781Z" }, + { url = "https://files.pythonhosted.org/packages/33/33/7873dc161c6a06f43cda13dec67b6fe152cb2f982581151956fa5e5cdb47/pynacl-1.6.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d29bfe37e20e015a7d8b23cfc8bd6aa7909c92a1b8f41ee416bbb3e79ef182b2", size = 188740, upload-time = "2026-01-01T17:32:15.083Z" }, + { url = "https://files.pythonhosted.org/packages/be/7b/4845bbf88e94586ec47a432da4e9107e3fc3ce37eb412b1398630a37f7dd/pynacl-1.6.2-cp38-abi3-macosx_10_10_universal2.whl", hash = "sha256:c949ea47e4206af7c8f604b8278093b674f7c79ed0d4719cc836902bf4517465", size = 388458, upload-time = "2026-01-01T17:32:16.829Z" }, + { url = "https://files.pythonhosted.org/packages/1e/b4/e927e0653ba63b02a4ca5b4d852a8d1d678afbf69b3dbf9c4d0785ac905c/pynacl-1.6.2-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8845c0631c0be43abdd865511c41eab235e0be69c81dc66a50911594198679b0", size = 800020, upload-time = "2026-01-01T17:32:18.34Z" }, + { url = "https://files.pythonhosted.org/packages/7f/81/d60984052df5c97b1d24365bc1e30024379b42c4edcd79d2436b1b9806f2/pynacl-1.6.2-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:22de65bb9010a725b0dac248f353bb072969c94fa8d6b1f34b87d7953cf7bbe4", size = 1399174, upload-time = "2026-01-01T17:32:20.239Z" }, + { url = "https://files.pythonhosted.org/packages/68/f7/322f2f9915c4ef27d140101dd0ed26b479f7e6f5f183590fd32dfc48c4d3/pynacl-1.6.2-cp38-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46065496ab748469cdd999246d17e301b2c24ae2fdf739132e580a0e94c94a87", size = 835085, upload-time = "2026-01-01T17:32:22.24Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d0/f301f83ac8dbe53442c5a43f6a39016f94f754d7a9815a875b65e218a307/pynacl-1.6.2-cp38-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a66d6fb6ae7661c58995f9c6435bda2b1e68b54b598a6a10247bfcdadac996c", size = 1437614, upload-time = "2026-01-01T17:32:23.766Z" }, + { url = "https://files.pythonhosted.org/packages/c4/58/fc6e649762b029315325ace1a8c6be66125e42f67416d3dbd47b69563d61/pynacl-1.6.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:26bfcd00dcf2cf160f122186af731ae30ab120c18e8375684ec2670dccd28130", size = 818251, upload-time = "2026-01-01T17:32:25.69Z" }, + { url = "https://files.pythonhosted.org/packages/c9/a8/b917096b1accc9acd878819a49d3d84875731a41eb665f6ebc826b1af99e/pynacl-1.6.2-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c8a231e36ec2cab018c4ad4358c386e36eede0319a0c41fed24f840b1dac59f6", size = 1402859, upload-time = "2026-01-01T17:32:27.215Z" }, + { url = "https://files.pythonhosted.org/packages/85/42/fe60b5f4473e12c72f977548e4028156f4d340b884c635ec6b063fe7e9a5/pynacl-1.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:68be3a09455743ff9505491220b64440ced8973fe930f270c8e07ccfa25b1f9e", size = 791926, upload-time = "2026-01-01T17:32:29.314Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f9/e40e318c604259301cc091a2a63f237d9e7b424c4851cafaea4ea7c4834e/pynacl-1.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:8b097553b380236d51ed11356c953bf8ce36a29a3e596e934ecabe76c985a577", size = 1363101, upload-time = "2026-01-01T17:32:31.263Z" }, + { url = "https://files.pythonhosted.org/packages/48/47/e761c254f410c023a469284a9bc210933e18588ca87706ae93002c05114c/pynacl-1.6.2-cp38-abi3-win32.whl", hash = "sha256:5811c72b473b2f38f7e2a3dc4f8642e3a3e9b5e7317266e4ced1fba85cae41aa", size = 227421, upload-time = "2026-01-01T17:32:33.076Z" }, + { url = "https://files.pythonhosted.org/packages/41/ad/334600e8cacc7d86587fe5f565480fde569dfb487389c8e1be56ac21d8ac/pynacl-1.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:62985f233210dee6548c223301b6c25440852e13d59a8b81490203c3227c5ba0", size = 239754, upload-time = "2026-01-01T17:32:34.557Z" }, + { url = "https://files.pythonhosted.org/packages/29/7d/5945b5af29534641820d3bd7b00962abbbdfee84ec7e19f0d5b3175f9a31/pynacl-1.6.2-cp38-abi3-win_arm64.whl", hash = "sha256:834a43af110f743a754448463e8fd61259cd4ab5bbedcf70f9dabad1d28a394c", size = 184801, upload-time = "2026-01-01T17:32:36.309Z" }, +] + [[package]] name = "pytest" version = "9.0.2"