diff --git a/.agents/skills/openrtc-python/SKILL.md b/.agents/skills/openrtc-python/SKILL.md index c0eaad6..a703d75 100644 --- a/.agents/skills/openrtc-python/SKILL.md +++ b/.agents/skills/openrtc-python/SKILL.md @@ -111,12 +111,20 @@ exactly one `Agent` subclass at module scope, and the filename doesn't start with `_`. Fix and re-run `openrtc list` until all agents appear. ```bash -# Development mode (auto-reload) +# Development mode (auto-reload) — set LIVEKIT_* env vars first openrtc dev --agents-dir ./agents # Production mode openrtc start --agents-dir ./agents +# Same LiveKit subcommands as python agent.py: console, connect, download-files +# openrtc console --agents-dir ./agents +# openrtc connect --agents-dir ./agents --room my-room + +# Optional: JSON Lines metrics + sidecar TUI (pip install 'openrtc[cli,tui]') +# openrtc dev --agents-dir ./agents --metrics-jsonl ./metrics.jsonl +# openrtc tui --watch ./metrics.jsonl + # Or run the entrypoint directly python main.py dev ``` @@ -144,6 +152,10 @@ Unknown metadata names raise `ValueError` — no silent fallback. - **`pool.run()` delegates to `livekit.agents.cli.run_app()`.** The first CLI argument must be `dev` or `start` (e.g. `python main.py dev`). Without it, the process exits immediately with a usage error. +- **`openrtc dev|start|…` sets up discovery then calls the same LiveKit CLI.** + OpenRTC-only flags (`--agents-dir`, `--dashboard`, `--metrics-jsonl`, …) are + stripped from `sys.argv` before LiveKit parses arguments—do not expect LiveKit + to understand them. - **Provider objects must be pickleable.** OpenRTC has built-in serialization for `livekit.plugins.openai` STT, TTS, and LLM. Other providers: use string identifiers or ensure the object is natively pickleable. diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3e5768b..da944fb 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,7 +26,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install -e ".[cli]" pytest pytest-asyncio pytest-cov + python -m pip install -e ".[cli,tui]" pytest pytest-asyncio pytest-cov - name: Run tests with coverage run: pytest --cov=openrtc --cov-report=xml --cov-fail-under=80 diff --git a/.gitignore b/.gitignore index 928ef42..623673c 100644 --- a/.gitignore +++ b/.gitignore @@ -164,6 +164,9 @@ venv.bak/ # Rope project settings .ropeproject +# Node (VitePress docs) +node_modules/ + # mkdocs documentation /site diff --git a/AGENTS.md b/AGENTS.md index 1627a71..a13daab 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -403,7 +403,7 @@ When in doubt: All commands are documented in `CONTRIBUTING.md`. Quick reference: - **Install deps:** `uv sync --group dev` -- **Tests:** `uv run pytest` (36 tests, all self-contained) +- **Tests:** `uv run pytest` (self-contained; no LiveKit server required) - **Lint:** `uv run ruff check .` - **Format check:** `uv run ruff format --check .` - **Type check:** `uv run mypy src/` (3 pre-existing errors as of this writing) @@ -414,6 +414,6 @@ All commands are documented in `CONTRIBUTING.md`. Quick reference: - The `tests/conftest.py` creates a fake `livekit.agents` module when the real one isn't importable. This allows tests to run without the full LiveKit SDK. The real SDK *is* installed by `uv sync`, but if you see import weirdness in tests, this shim is the reason. - Version is derived from git tags via `hatch-vcs`. In a dev checkout the version will be something like `0.0.9.dev0+g`. - `mypy` has 3 pre-existing errors in `pool.py` — these are not regressions from your changes. -- Running `openrtc start` or `openrtc dev` requires a running LiveKit server and provider API keys. For development validation, use `openrtc list` which exercises discovery and routing without network dependencies. +- Running `openrtc start` or `openrtc dev` requires a running LiveKit server and provider API keys. For development validation, use `openrtc list` which exercises discovery and routing without network dependencies. The optional sidecar metrics TUI (`openrtc tui --watch`, requires `openrtc[tui]` / dev deps) tails `--metrics-jsonl` from a worker in another terminal. - `pytest-cov` is in the dev dependency group; CI uses `--cov-fail-under=80`; run `uv run pytest --cov=openrtc --cov-report=xml --cov-fail-under=80` to match. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 25a521b..1c0824b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,8 +15,10 @@ This repository uses `uv` for local development. uv sync --group dev ``` -The dev group includes Typer and Rich so `uv run openrtc …` works without -`--extra cli`. End users install the CLI with `pip install 'openrtc[cli]'`. +The dev group includes Typer, Rich, and Textual so `uv run openrtc …` and +`uv run openrtc tui …` work without extra install flags. End users install the +CLI with `pip install 'openrtc[cli]'` and the sidecar TUI with +`pip install 'openrtc[tui]'` (or `openrtc[cli,tui]` together). If you prefer, you can also install the package and dev dependencies with pip, but `uv` is the preferred workflow for contributors. diff --git a/README.md b/README.md index 4bd4ac8..613b9bc 100644 --- a/README.md +++ b/README.md @@ -6,9 +6,12 @@
-
- A Python framework for running multiple LiveKit voice agents in a single worker process with shared prewarmed models. -
+# openrtc-python + +Run N LiveKit voice agents in one worker. Pay the model-load cost once. + +*PyPI package name: [`openrtc`](https://pypi.org/project/openrtc/).* +
@@ -17,118 +20,72 @@ Ruff PyPI version codecov + CI

-OpenRTC is designed for the common case where you want to run several different -voice agents on a small VPS without paying the memory cost of one full -LiveKit worker per agent. - -
-
Table of Contents
    -
  1. Why OpenRTC exists
  2. -
  3. What OpenRTC wraps
  4. -
  5. Memory comparison
  6. +
  7. The problem
  8. +
  9. What openrtc does
  10. Installation
  11. -
  12. Quick start: register agents directly
  13. -
  14. Quick start: discover agent files
  15. -
  16. Routing behavior
  17. +
  18. Quick start: explicit registration with add()
  19. +
  20. Quick start: one Python file per agent with discover()
  21. +
  22. Memory: before and after
  23. +
  24. Routing
  25. Greetings and session options
  26. -
  27. Provider model strings
  28. -
  29. CLI usage
  30. +
  31. Provider configuration
  32. +
  33. CLI and TUI
  34. Public API at a glance
  35. Project structure
  36. Contributing
  37. +
  38. License

-## Why OpenRTC exists - -A standard `livekit-agents` worker process loads shared runtime assets such as -Python, Silero VAD, and turn-detection models. If you run ten agents as ten -separate workers, you pay that base memory cost ten times. - -OpenRTC keeps your agent classes completely standard and only centralizes the -worker boilerplate: - -- shared prewarm for VAD and turn detection -- metadata-based dispatch to the correct agent -- per-agent `AgentSession` construction inside one worker - -Your agent code still subclasses `livekit.agents.Agent` directly. If you stop -using OpenRTC later, your agent classes still work as normal LiveKit agents. - -## What OpenRTC wraps - -OpenRTC intentionally wraps only the worker orchestration layer: - -1. `AgentServer()` setup and prewarm -2. a universal `@server.rtc_session()` entrypoint -3. per-call `AgentSession()` creation with the right providers - -OpenRTC does **not** replace: +## The problem -- `livekit.agents.Agent` -- `@function_tool` -- `RunContext` -- `on_enter`, `on_exit`, `llm_node`, `stt_node`, `tts_node` -- standard LiveKit deployment patterns +You already ship three voice agents with `livekit-agents`. Each agent is its own worker on the same VPS. Every worker process loads the same shared stack: Python runtime, Silero VAD, and the turn-detection model. You are not loading three different models. You are loading the same stack three times because the process boundary forces it. On a 1–2 GB instance, that shows up as duplicate resident set for every idle worker. You pay RAM for copies you do not need. -## Memory comparison +## What openrtc does -| Deployment model | Shared runtime loads | Approximate memory shape | -| --- | --- | --- | -| 10 separate LiveKit workers | 10x | ~500 MB × 10 | -| 1 OpenRTC pool with 10 agents | 1x shared + per-call session cost | ~500 MB shared + active-call overhead | - -The exact numbers depend on your providers, concurrency, and environment, but -OpenRTC is built to reduce duplicate worker overhead. +`openrtc` gives you one `AgentPool` in one worker: prewarm runs once, each incoming call still gets its own `AgentSession`, and you register multiple `Agent` subclasses on the pool so dispatch can pick one per session from metadata or fallbacks. This package does not replace your agent code. It does not sit between you and `livekit.agents.Agent`, `@function_tool`, `RunContext`, `on_enter`, `on_exit`, `llm_node`, `stt_node`, or `tts_node`. You keep your subclasses and tools as they are. You change how many workers you run, not how you write an agent. ## Installation -Install OpenRTC from PyPI: - ```bash pip install openrtc ``` -The base package pulls in `livekit-agents[openai,silero,turn-detector]`, so the -runtime plugins required by shared prewarm are installed without extra flags. - -Install the Typer/Rich CLI (`openrtc list`, `openrtc start`, `openrtc dev`) with: +The base install pulls in `livekit-agents[openai,silero,turn-detector]` so shared prewarm has the plugins it expects. ```bash pip install 'openrtc[cli]' ``` -If you are developing locally, the repository uses `uv` for environment and -command management. - -### Required environment variables - -OpenRTC uses the same environment variables as a standard LiveKit worker: +Optional Textual sidecar for live metrics: ```bash -LIVEKIT_URL=ws://localhost:7880 -LIVEKIT_API_KEY=devkey -LIVEKIT_API_SECRET=secret +pip install 'openrtc[cli,tui]' ``` -For provider-native OpenAI plugin objects, set: +Set the same variables you use for any LiveKit worker: ```bash -OPENAI_API_KEY=... +export LIVEKIT_URL=ws://localhost:7880 +export LIVEKIT_API_KEY=devkey +export LIVEKIT_API_SECRET=secret ``` -## Quick start: register agents directly with `add()` +For OpenAI-backed plugins, set `OPENAI_API_KEY` as you already do. -Use `AgentPool.add(...)` when you want the most explicit setup. +## Quick start: explicit registration with `add()` + +Use this when you want every agent registered in one place with explicit names and providers. ```python from livekit.agents import Agent @@ -165,15 +122,11 @@ pool.add( pool.run() ``` -## Quick start: discover agent files with `@agent_config(...)` +## Quick start: one Python file per agent with `discover()` -Use discovery when you want one agent module per file. OpenRTC will import each -module, find a local `Agent` subclass, and optionally read overrides from the -`@agent_config(...)` decorator. +Use this when you prefer one module per agent and optional `@agent_config(...)` on each class. -Discovered agents are safe to run under `livekit dev`, including spawn-based -worker runtimes such as macOS. For direct `add()` registration, define agent -classes at module scope so worker processes can reload them. +Create a directory (for example `agents/`) and add one `.py` file per agent. Then: ```python from pathlib import Path @@ -190,7 +143,7 @@ pool.discover(Path("./agents")) pool.run() ``` -Example agent file: +Example file `agents/restaurant.py`: ```python from livekit.agents import Agent @@ -203,20 +156,24 @@ class RestaurantAgent(Agent): super().__init__(instructions="You help callers make restaurant bookings.") ``` -### Discovery defaults +If a module has no `@agent_config`, the agent name defaults to the filename stem. STT, LLM, TTS, and greeting fall back to the pool defaults. + +Discovered agents work with `livekit dev` and spawn-based workers on macOS. For `add()`, define agent classes at module scope so worker reload can import them. -A discovered module does not need to provide any OpenRTC metadata. If the agent -class has no `@agent_config(...)` decorator: +## Memory: before and after -- the agent name defaults to the Python filename stem -- STT/LLM/TTS/greeting fall back to `AgentPool(...)` defaults +Assume an illustrative **~400 MB** idle baseline per worker for the shared stack (VAD, turn detector, and similar). Your measured RSS will differ by provider, model, and OS. -That keeps discovery straightforward while still allowing per-agent overrides -when needed. +| | Before openrtc | After openrtc | +| --- | --- | --- | +| Three workers, same stack | about **3 × 400 MB ≈ 1.2 GB** idle baseline (three loads) | — | +| One worker, three registered agents | — | about **one × 400 MB** idle baseline (one load) plus per-session overhead | + +Exact numbers depend on your providers, concurrency, and call patterns. The win is not loading that stack once per agent worker. -## Routing behavior +## Routing -For each incoming room, `AgentPool` resolves the agent in this order: +One process hosts several agent classes, so each session must resolve to a single registered name. `AgentPool` resolves the agent in this order: 1. `ctx.job.metadata["agent"]` 2. `ctx.job.metadata["demo"]` @@ -225,16 +182,11 @@ For each incoming room, `AgentPool` resolves the agent in this order: 5. room name prefix match, such as `restaurant-call-123` 6. the first registered agent -This lets one worker process host several agents while staying compatible with -standard LiveKit job and room metadata. - -If metadata references an unknown registered name, OpenRTC raises a `ValueError` -instead of silently falling back. +If metadata names an agent that is not registered, you get a `ValueError` instead of a silent fallback. ## Greetings and session options -OpenRTC can play a greeting after `ctx.connect()` and pass extra options into -`AgentSession(...)`. +You can pass a greeting and extra `AgentSession` options per registration. ```python pool.add( @@ -247,36 +199,25 @@ pool.add( ) ``` -Direct keyword arguments take precedence over the same keys inside -`session_kwargs`. +Direct keyword arguments win over the same keys inside `session_kwargs`. -By default, OpenRTC builds explicit `turn_handling` for each session using the -multilingual turn detector with VAD-based interruption. That keeps the shared -turn detector available without implicitly enabling LiveKit adaptive -interruption. To opt into adaptive interruption explicitly, pass -`session_kwargs={"turn_handling": {"interruption": {"mode": "adaptive"}}}`. +By default, OpenRTC sets explicit `turn_handling` with the multilingual turn detector and VAD-based interruption. To opt into adaptive interruption, pass `session_kwargs={"turn_handling": {"interruption": {"mode": "adaptive"}}}`. ## Provider configuration -OpenRTC now passes `stt`, `llm`, and `tts` through to `livekit-agents` -unchanged. - -For self-hosted and provider-native setups, pass instantiated provider objects: +Pass instantiated provider objects through to `livekit-agents` unchanged, for example: - `openai.STT(model="gpt-4o-mini-transcribe")` - `openai.responses.LLM(model="gpt-4.1-mini")` - `openai.TTS(model="gpt-4o-mini-tts")` -If you pass raw strings such as `openai/gpt-4.1-mini`, OpenRTC leaves them -unchanged and `livekit-agents` will interpret them according to the current -LiveKit runtime, which may mean inference-backed behavior on compatible cloud -deployments. +If you pass strings such as `openai/gpt-4.1-mini`, OpenRTC leaves them as-is and the LiveKit runtime interprets them for your deployment. -## CLI usage +## CLI and TUI -OpenRTC includes a CLI for discovery-based workflows. +Install `openrtc[cli]` to get `openrtc` on your PATH. Subcommands follow the LiveKit Agents CLI shape (`dev`, `start`, `console`, `connect`, `download-files`), plus `list` and `tui`. -### List discovered agents +**List what discovery would register** (defaults are string passthroughs for `livekit-agents`, not constructed provider objects): ```bash openrtc list \ @@ -286,59 +227,34 @@ openrtc list \ --default-tts openai/gpt-4o-mini-tts ``` -These CLI defaults are raw passthrough strings for `livekit-agents`, not -provider-object construction. - -Stable output for scripts and CI: - -- `--plain` — line-oriented text without ANSI or table borders (similar to the - legacy `print` format). Use `--resources` for source-size and RSS lines. -- `--json` — machine-readable JSON with a `schema_version` field; combine with - `--resources` for `resource_summary` (footprint + resident-set metadata). - The `resident_set` object includes a `description`: on **Linux** it reflects - current **VmRSS**; on **macOS** it is **peak** `ru_maxrss` (bytes), not - instantaneous live RSS—compare runs only on the same OS. - -### Run in production mode +**Run a production worker** (after exporting `LIVEKIT_*`): ```bash openrtc start --agents-dir ./agents -openrtc start --agents-dir ./agents --dashboard ``` -### Run in development mode +**Run a development worker**: ```bash openrtc dev --agents-dir ./agents -openrtc dev --agents-dir ./examples/agents --dashboard --metrics-json-file ./runtime.json ``` -Both `start` and `dev` discover agents first and then hand off to the underlying -LiveKit worker runtime. - -The optional runtime dashboard shows: +Optional visibility: `--dashboard` prints a Rich summary in the terminal. `--metrics-json-file ./runtime.json` overwrites a JSON snapshot on each tick. Use that for scripts, dashboards, or CI. For JSON Lines plus a separate terminal UI, use `--metrics-jsonl ./metrics.jsonl` with `openrtc tui --watch ./metrics.jsonl` after `pip install 'openrtc[cli,tui]'`. -- current worker RSS -- active sessions and total handled sessions -- per-agent load -- the last routed agent and latest failure -- an estimated memory-savings comparison between one shared worker and one - worker per registered agent +Stable machine output: `openrtc list --json` and `--plain`. Combine `--resources` when you want footprint hints. OpenRTC-only flags are stripped before the handoff to LiveKit’s CLI parser. -For automation, `--metrics-json-file` writes the same runtime snapshot as JSON -while the worker is running. See [docs/cli.md](docs/cli.md) for a step-by-step -"prove the value" workflow. +Full flag lists live in [docs/cli.md](docs/cli.md). ## Public API at a glance -OpenRTC currently exposes: +Everything openrtc exposes publicly is listed here. Anything else is internal and not treated as stable. - `AgentPool` - `AgentConfig` - `AgentDiscoveryConfig` - `agent_config(...)` -On `AgentPool`, the primary public methods and properties are: +On `AgentPool`: - `add(...)` - `discover(...)` @@ -347,6 +263,7 @@ On `AgentPool`, the primary public methods and properties are: - `remove(name)` - `run()` - `runtime_snapshot()` +- `drain_metrics_stream_events()` — for JSONL export paths (mainly CLI; rare in app code) - `server` ## Project structure @@ -355,17 +272,20 @@ On `AgentPool`, the primary public methods and properties are: src/openrtc/ ├── __init__.py ├── cli.py +├── cli_app.py +├── metrics_stream.py +├── tui_app.py └── pool.py ``` -- `pool.py` contains the core `AgentPool` implementation and discovery helpers -- `cli.py` provides discovery and worker startup commands -- `__init__.py` exposes the public package API +- `pool.py` — `AgentPool`, discovery, routing +- `cli.py` / `cli_app.py` — Typer/Rich CLI (`openrtc[cli]`) +- `metrics_stream.py` — JSONL metrics schema +- `tui_app.py` — optional Textual sidecar (`openrtc[tui]`) ## Contributing -Contributions are welcome. Please read [CONTRIBUTING.md](CONTRIBUTING.md) -before opening a pull request. +See [CONTRIBUTING.md](CONTRIBUTING.md). ## License diff --git a/codecov.yml b/codecov.yml index 9ab4c6e..42af682 100644 --- a/codecov.yml +++ b/codecov.yml @@ -3,6 +3,12 @@ # Codecov checks and PR comments. Patch status is informational so small PRs # are not blocked twice (pytest remains the hard gate for overall %). +# Optional Textual sidecar (`openrtc[tui]`). Excluded from Codecov totals/patch so +# PR checks are not dominated by UI-only lines; `pytest --cov=openrtc` still +# includes it unless you omit it locally. +ignore: + - "**/openrtc/tui_app\\.py" + coverage: precision: 2 round: down diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index be7f7ba..0d902ce 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -6,8 +6,19 @@ export default defineConfig({ base: '/openrtc-python/', cleanUrls: true, lastUpdated: true, + head: [ + ['link', { rel: 'preconnect', href: 'https://fonts.googleapis.com' }], + [ + 'link', + { + rel: 'preconnect', + href: 'https://fonts.gstatic.com', + crossorigin: '', + }, + ], + ], themeConfig: { - logo: '/logo.svg', + logo: '/logo.png', nav: [ { text: 'Guide', link: '/getting-started' }, { text: 'Concepts', link: '/concepts/architecture' }, diff --git a/docs/.vitepress/theme/custom.css b/docs/.vitepress/theme/custom.css new file mode 100644 index 0000000..3ceb214 --- /dev/null +++ b/docs/.vitepress/theme/custom.css @@ -0,0 +1,171 @@ +/** + * OpenRTC docs theme — aligned with brand assets (cyan / deep blue, modern sans). + */ + +@import 'https://fonts.googleapis.com/css2?family=DM+Sans:ital,opsz,wght@0,9..40,400..700;1,9..40,500..700&family=JetBrains+Mono:wght@400;500&display=swap'; + +:root { + --ort-font-sans: 'DM Sans', ui-sans-serif, system-ui, sans-serif, + 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji'; + --ort-font-mono: 'JetBrains Mono', ui-monospace, 'Menlo', 'Monaco', 'Consolas', + monospace; + + /* Light: cool neutrals + readable cyan accents */ + --vp-c-brand-1: #006f94; + --vp-c-brand-2: #0088b8; + --vp-c-brand-3: #00a3d9; + --vp-c-brand-soft: rgba(0, 163, 217, 0.14); + + --vp-c-bg: #f5fafc; + --vp-c-bg-alt: #e8f3f8; + --vp-c-bg-elv: #ffffff; + --vp-c-bg-soft: #e2eef5; + + --vp-c-divider: #d4e4ed; + --vp-c-gutter: #cfe0ea; + + --vp-font-family-base: var(--ort-font-sans); + --vp-font-family-mono: var(--ort-font-mono); + + --vp-home-hero-image-background-image: radial-gradient( + circle at 30% 30%, + rgba(0, 196, 255, 0.35) 0%, + rgba(0, 120, 180, 0.12) 45%, + transparent 72% + ); + + --vp-nav-logo-height: 34px; +} + +.dark { + /* Dark: deep navy canvas, electric cyan accents (banner / logo) */ + --vp-c-brand-1: #5ae4ff; + --vp-c-brand-2: #24d4ff; + --vp-c-brand-3: #00b8e6; + --vp-c-brand-soft: rgba(0, 220, 255, 0.14); + + --vp-c-bg: #070b12; + --vp-c-bg-alt: #0c121f; + --vp-c-bg-elv: #111a2c; + --vp-c-bg-soft: #152033; + + --vp-c-border: #2a3a4f; + --vp-c-divider: #1c2838; + --vp-c-gutter: #0a0e16; + + --vp-c-text-1: #e8f4f8; + --vp-c-text-2: #9db4c4; + --vp-c-text-3: #6b8294; + + --vp-home-hero-image-background-image: radial-gradient( + circle at 28% 28%, + rgba(0, 220, 255, 0.45) 0%, + rgba(0, 100, 160, 0.2) 42%, + transparent 70% + ); + + --vp-code-block-bg: #0c121f; + --vp-code-bg: rgba(0, 220, 255, 0.08); +} + +/* Home: subtle ambient glow behind hero */ +.VPHome { + background: radial-gradient( + ellipse 120% 70% at 50% -15%, + rgba(0, 180, 230, 0.09), + transparent 52% + ); +} + +.dark .VPHome { + background: radial-gradient( + ellipse 100% 55% at 50% 0%, + rgba(0, 200, 255, 0.14), + transparent 58% + ); +} + +/* Echo logo typography: italic, tight tracking on product name */ +.VPHero .heading .name { + font-style: italic; + letter-spacing: -0.03em; +} + +/* Gradient fill on hero title (clip) */ +.VPHero .heading .name.clip { + background: linear-gradient(118deg, #00c6ff 0%, #0090c9 42%, #005a7a 100%); + -webkit-background-clip: text; + background-clip: text; + -webkit-text-fill-color: transparent !important; + color: transparent !important; +} + +.dark .VPHero .heading .name.clip { + background: linear-gradient(118deg, #8aebff 0%, #3bdcff 38%, #00a8d4 100%); + -webkit-background-clip: text; + background-clip: text; + -webkit-text-fill-color: transparent !important; + color: transparent !important; +} + +/* Second line stays solid for readability */ +.VPHero .heading .text { + color: var(--vp-c-text-1); +} + +/* Landscape banner: use more horizontal room on large screens */ +@media (min-width: 960px) { + .VPHome .VPHero.has-image .image-container { + width: 100%; + max-width: 440px; + min-height: 260px; + height: auto; + } + + .VPHome .VPHero.has-image .image-src { + position: relative; + top: auto; + left: auto; + transform: none; + margin: 0 auto; + display: block; + max-width: 100%; + max-height: min(280px, 40vh); + width: auto; + height: auto; + object-fit: contain; + } + + .VPHome .VPHero.has-image .image-bg { + width: 280px; + height: 280px; + } +} + +/* Nav logo: crisp PNG scaling */ +.VPNavBarTitle img { + object-fit: contain; +} + +/* Top bar: light separation without heavy chrome */ +.VPNav { + border-bottom: 1px solid var(--vp-c-divider); + background-color: color-mix(in srgb, var(--vp-c-bg) 88%, transparent); + backdrop-filter: blur(10px); +} + +@supports not (backdrop-filter: blur(10px)) { + .VPNav { + background-color: var(--vp-c-bg); + } +} + +/* Sidebar: slight depth */ +.VPSidebar { + background-color: var(--vp-c-bg-alt); +} + +/* Doc links: clearer hover */ +.vp-doc a:hover { + text-decoration-thickness: 2px; +} diff --git a/docs/.vitepress/theme/index.ts b/docs/.vitepress/theme/index.ts new file mode 100644 index 0000000..370438e --- /dev/null +++ b/docs/.vitepress/theme/index.ts @@ -0,0 +1,7 @@ +import type { Theme } from 'vitepress' +import DefaultTheme from 'vitepress/theme' +import './custom.css' + +export default { + extends: DefaultTheme, +} satisfies Theme diff --git a/docs/api/pool.md b/docs/api/pool.md index 24c3f41..8bf1e6f 100644 --- a/docs/api/pool.md +++ b/docs/api/pool.md @@ -211,7 +211,8 @@ snapshot = pool.runtime_snapshot() ``` Returns a typed runtime snapshot for the current shared worker. The snapshot is -used by the CLI dashboard and `--metrics-json-file` output and includes: +used by the CLI dashboard, `--metrics-json-file`, and `kind: "snapshot"` lines +in `--metrics-jsonl` output. It includes: - resident memory metadata - registered and active session counts @@ -221,6 +222,16 @@ used by the CLI dashboard and `--metrics-json-file` output and includes: - last routed agent - a best-effort shared-worker savings estimate +## `drain_metrics_stream_events()` + +```python +events = pool.drain_metrics_stream_events() +``` + +Removes and returns queued **session lifecycle** records for JSONL export +(`session_started`, `session_finished`, `session_failed`). The OpenRTC CLI calls +this when writing `--metrics-jsonl`; most applications can ignore it. + ## Routing behavior `AgentPool` resolves the active agent in this order: diff --git a/docs/cli.md b/docs/cli.md index 92ba1c1..5698260 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -7,22 +7,68 @@ programmatic entry is `typer.main.get_command(app).main(...)` (Click’s ## Installation -The **library** ( `AgentPool`, discovery, routing ) installs with: +The **library** (`AgentPool`, discovery, routing) installs with: ```bash pip install openrtc ``` -The **CLI stack** (Typer, Rich) is declared as the optional extra `cli`: +The **CLI stack** (Typer, Rich) is the optional extra `cli`: ```bash pip install 'openrtc[cli]' ``` +The **sidecar TUI** (Textual) is a separate optional extra: + +```bash +pip install 'openrtc[cli,tui]' +``` + If Typer/Rich are not importable, `openrtc.cli:main` exits with `1` and prints -an install hint. In practice, `livekit-agents` may pull Typer transitively, so -the hint path is mainly covered by tests and edge environments—see -`tests/test_cli_optional_extra_integration.py` in the repo. +an install hint. If Textual is missing, `openrtc tui` logs an error and exits +with `1`. + +## Typical usage + +1. Set the same variables as a standard LiveKit worker: + + ```bash + export LIVEKIT_URL=ws://localhost:7880 + export LIVEKIT_API_KEY=devkey + export LIVEKIT_API_SECRET=secret + ``` + +2. Run a worker subcommand with **only** `--agents-dir` (plus any provider + defaults your agents need): + + ```bash + openrtc dev --agents-dir ./agents + # or + openrtc start --agents-dir ./agents + ``` + +Defaults are conservative: **no** Rich dashboard unless you pass `--dashboard`, +and **no** metrics files unless you pass `--metrics-json-file` or +`--metrics-jsonl`. Refresh intervals default to **1 second** where applicable. + +Subcommands mirror the LiveKit Agents CLI (`python agent.py dev`, `start`, +`console`, `connect`, `download-files`). OpenRTC adds **`--agents-dir`** for +discovery, then delegates to `livekit.agents.cli.run_app`. OpenRTC-only flags are +stripped from `sys.argv` before that handoff so LiveKit does not see options +like `--agents-dir`. + +## Connection overrides + +You can override `LIVEKIT_*` per invocation: + +- `--url` +- `--api-key` +- `--api-secret` +- `--log-level` (also `LIVEKIT_LOG_LEVEL`) + +These appear under **Connection** or **Advanced** in `--help` depending on the +flag. ## Commands @@ -37,6 +83,8 @@ Discovers agent modules and prints each agent’s resolved settings. when the shape changes) and `command: "list"`. Combine with `--resources` for `resource_summary`. - **`--plain` and `--json` together** are rejected (non-zero exit). +- **`--resources`** — Footprint and memory hints (grouped under **Advanced** in + `--help`). ```bash openrtc list --agents-dir ./agents @@ -46,40 +94,88 @@ openrtc list --agents-dir ./agents --json ### `openrtc start` -Discovers agent modules and starts the LiveKit worker in production mode. +Production-style worker (same role as `python agent.py start`). ```bash openrtc start --agents-dir ./agents ``` -Optional runtime visibility: - -- **`--dashboard`** — Show a live Rich dashboard with worker RSS, active - sessions, failures, and an estimated “separate workers vs shared worker” - savings comparison. -- **`--dashboard-refresh 1.0`** — Control how often the dashboard refreshes. -- **`--metrics-json-file ./openrtc-runtime.json`** — Write a live JSON snapshot - for automation and host-side tooling. - ### `openrtc dev` -Discovers agent modules and starts the LiveKit worker in development mode. +Development worker with reload (same role as `python agent.py dev`). ```bash openrtc dev --agents-dir ./agents -openrtc dev --agents-dir ./examples/agents --dashboard -openrtc dev --agents-dir ./examples/agents --dashboard --metrics-json-file ./runtime.json ``` -## Shared default options +### `openrtc console` + +Local console session (same role as `python agent.py console`). + +```bash +openrtc console --agents-dir ./agents +``` + +### `openrtc connect` + +Connect the worker to an existing room (LiveKit `connect`). Requires +`--room`. -Each command accepts these optional defaults, which are applied when a +```bash +openrtc connect --agents-dir ./agents --room my-room +``` + +### `openrtc download-files` + +Download plugin assets (LiveKit `download-files`). Only needs the agents +directory (for a valid worker entrypoint) plus connection settings—**no** +`--default-stt` / `--default-llm` / `--default-tts` / `--default-greeting`. + +```bash +openrtc download-files --agents-dir ./agents +``` + +### `openrtc tui` + +Sidecar Textual UI that tails a **JSON Lines** metrics file written by the +worker (`--metrics-jsonl`). Requires `openrtc[tui]`. + +```bash +# Terminal 1 +openrtc dev --agents-dir ./agents --metrics-jsonl ./openrtc-metrics.jsonl + +# Terminal 2 +openrtc tui --watch ./openrtc-metrics.jsonl +``` + +Use **`--from-start`** (under **Advanced**) to read the file from the beginning +instead of tailing from EOF. + +## Runtime visibility and automation + +- **`--dashboard`** — Live Rich summary (RSS, sessions, routing, savings + estimate). Off by default. +- **`--metrics-json-file PATH`** — Overwrites a JSON file each tick with the + latest `PoolRuntimeSnapshot` (good for scripts). Grouped under **Advanced**. +- **`--metrics-jsonl PATH`** — Appends **versioned JSON Lines** (truncates when + the worker starts). Each line is one record: `schema_version`, `kind` + (`snapshot` or `event`), `seq`, `wall_time_unix`, `payload`. Snapshots match + `PoolRuntimeSnapshot.to_dict()`; events carry session lifecycle hints + (`session_started`, `session_finished`, `session_failed`). Intended for + `openrtc tui --watch` and other tail consumers. +- **`--dashboard-refresh`** — Interval in seconds for dashboard, metrics file, + and JSONL when `--metrics-jsonl-interval` is not set (**Advanced**). +- **`--metrics-jsonl-interval`** — Override JSONL cadence only (**Advanced**). + +## Shared default options (discovery) + +Worker commands that load agents accept optional defaults applied when a discovered agent does not override them via `@agent_config(...)`: - `--default-stt` - `--default-llm` - `--default-tts` -- `--default-greeting` +- `--default-greeting` (**Advanced**) Example: @@ -88,12 +184,11 @@ openrtc list \ --agents-dir ./examples/agents \ --default-stt openai/gpt-4o-mini-transcribe \ --default-llm openai/gpt-4.1-mini \ - --default-tts openai/gpt-4o-mini-tts \ - --default-greeting "Hello from OpenRTC." + --default-tts openai/gpt-4o-mini-tts ``` -These defaults are passed through to `livekit-agents` as raw strings. If you -need provider-native plugin objects, configure them in Python with `AgentPool` +These defaults are passed through to `livekit-agents` as raw strings. For +provider-native plugin objects, configure them in Python with `AgentPool` instead of through the CLI flags. ## `list --resources` (footprint) @@ -106,12 +201,7 @@ With **`--resources`**, `list` adds: from `openrtc.resources` (Linux: current VmRSS; macOS: peak `ru_maxrss`, not live RSS—see `resident_set.description` in `--json` output). - **Savings estimate** — a transparent estimate of the memory saved by one - shared worker versus one worker per registered agent. The estimate is based on - the current shared-worker baseline and is meant as an explanatory comparison, - not an orchestrator-level billing metric. - -Use this for **rough** local comparisons (single worker vs many images). For -production, rely on host or container metrics. + shared worker versus one worker per registered agent. ```bash openrtc list --agents-dir ./examples/agents --resources @@ -122,23 +212,20 @@ openrtc list --agents-dir ./examples/agents --resources --json - `--agents-dir` is required for every command. - `list` returns a non-zero exit code when no discoverable agents are found. -- `start` and `dev` both discover agents before handing off to the underlying - LiveKit worker runtime. -- The live dashboard and `--metrics-json-file` use runtime snapshots from the - running shared worker, unlike `list --resources`, which reports only on the +- Worker commands discover agents before handing off to the LiveKit CLI. +- The live dashboard, `--metrics-json-file`, and `--metrics-jsonl` reflect the + **running** shared worker, unlike `list --resources`, which reflects the short-lived CLI discovery process. ## Prove the shared-worker value locally -One practical workflow is: - 1. Discover your agents: ```bash openrtc list --agents-dir ./examples/agents --resources ``` -2. Start one shared worker with the dashboard enabled: +2. Start one shared worker with the dashboard and/or metrics output: ```bash openrtc dev \ @@ -147,14 +234,18 @@ One practical workflow is: --metrics-json-file ./runtime.json ``` -3. Watch the dashboard for: - - **Worker RSS** — current shared-worker memory - - **Active sessions** — how much load the single worker is handling - - **Estimated saved** — the gap between one shared worker and the “one worker - per agent” baseline - - **Per-agent sessions** — which agents are actively consuming capacity + Or enable JSONL for a sidecar TUI: + + ```bash + openrtc dev \ + --agents-dir ./examples/agents \ + --metrics-jsonl ./openrtc-metrics.jsonl + ``` + +3. Watch the dashboard (or `openrtc tui --watch ./openrtc-metrics.jsonl`) for + worker RSS, active sessions, routing, and errors. -4. Use `runtime.json` for automation, shell scripts, or container-side scraping. +4. Use `runtime.json` or the JSONL stream for automation or scraping. -For production capacity planning, compare these OpenRTC runtime snapshots with -host or container telemetry from your deployment platform. +For production capacity planning, compare these snapshots with host or container +telemetry from your deployment platform. diff --git a/docs/getting-started.md b/docs/getting-started.md index 5762af0..b07c8ab 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -14,15 +14,21 @@ pip install openrtc The base package includes the LiveKit Silero and turn-detector plugins used by OpenRTC's shared prewarm path. -Install the **Typer/Rich CLI** (`openrtc list`, `openrtc start`, `openrtc dev`) -with: +Install the **Typer/Rich CLI** (`openrtc list`, `openrtc start`, `openrtc dev`, +`openrtc console`, …) with: ```bash pip install 'openrtc[cli]' ``` -See [CLI](./cli) for output modes (`--plain`, `--json`, `--resources`) and -optional-dependency behavior. +Install the optional **Textual sidecar** for `openrtc tui --watch` with: + +```bash +pip install 'openrtc[cli,tui]' +``` + +See [CLI](./cli) for subcommands, output modes (`--plain`, `--json`, `--resources`), +the JSONL metrics stream (`--metrics-jsonl`), and optional-dependency behavior. If you are contributing locally, install the package in editable mode: @@ -31,7 +37,19 @@ python -m pip install -e . ``` Contributor environments typically use `uv sync --group dev`, which includes -Typer and Rich so the CLI runs without passing `--extra cli`. +Typer, Rich, and Textual so `openrtc` and `openrtc tui` run without extra flags. + +## CLI quick path + +With `LIVEKIT_URL`, `LIVEKIT_API_KEY`, and `LIVEKIT_API_SECRET` set, the minimal +worker invocation is: + +```bash +openrtc dev --agents-dir ./agents +``` + +Use `openrtc start` for production-style runs. See [CLI](./cli) for `console`, +`connect`, `download-files`, metrics files, and the sidecar TUI. ## Quick start diff --git a/docs/index.md b/docs/index.md index 18c7949..772fd2f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,25 +1,32 @@ -# OpenRTC - -OpenRTC is a Python package for running multiple LiveKit voice agents in a -single worker process with shared prewarmed runtime dependencies. - -## Why OpenRTC? - -- **Multi-agent routing** from a single worker process. -- **Shared prewarm** for VAD and turn detection models. -- **Explicit registration** through a small programmatic API. -- **LiveKit-native runtime** built on `livekit-agents`. - -## What you can do today - -The current package is intentionally small and focused: - -- register one or more LiveKit `Agent` subclasses with `AgentPool` -- select an agent using room or job metadata -- share runtime dependencies across sessions in one worker process -- start a LiveKit worker using the registered pool -- use the optional CLI (`pip install 'openrtc[cli]'`) for discovery, stable - `--json` / `--plain` output, and local `--resources` footprint hints +--- +layout: home +outline: false + +hero: + name: OpenRTC + text: Shared worker voice agents + tagline: Register multiple LiveKit agents in one process, route by metadata, and prewarm models once. + image: + src: /banner.png + alt: OpenRTC banner + actions: + - theme: brand + text: Get started + link: /getting-started + - theme: alt + text: CLI reference + link: /cli + +features: + - title: Multi-agent routing + details: Dispatch the right Agent implementation from a single worker using room or job metadata. + - title: Shared prewarm + details: Load VAD, turn detection, and other heavy dependencies once for every session in the pool. + - title: LiveKit-native runtime + details: Built on livekit-agents with familiar dev, start, console, and connect-style workflows. + - title: CLI and observability + details: Optional openrtc CLI with JSON output, resource hints, JSONL metrics, and a Textual sidecar TUI. +--- ## Read the docs diff --git a/docs/public/banner.png b/docs/public/banner.png new file mode 100644 index 0000000..e102acd Binary files /dev/null and b/docs/public/banner.png differ diff --git a/docs/public/logo.png b/docs/public/logo.png new file mode 100644 index 0000000..85345be Binary files /dev/null and b/docs/public/logo.png differ diff --git a/pyproject.toml b/pyproject.toml index 8cd0b6a..468c1be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,11 @@ cli = [ "rich>=13", "typer>=0.12", ] +tui = [ + "rich>=13", + "typer>=0.12", + "textual>=0.47,<2", +] [project.urls] Homepage = "https://github.com/mahimailabs/openrtc" @@ -74,6 +79,9 @@ ignore = [ "W191", ] +[tool.ruff.lint.per-file-ignores] +"tests/conftest.py" = ["E402"] + [tool.mypy] python_version = "3.10" warn_return_any = true @@ -95,6 +103,7 @@ dev = [ "python-dotenv>=1.2.2", "rich>=13", "ruff>=0.15.6", + "textual>=0.47,<2", "typer>=0.12", ] diff --git a/src/openrtc/cli_app.py b/src/openrtc/cli_app.py index ffc6c1a..2fb662f 100644 --- a/src/openrtc/cli_app.py +++ b/src/openrtc/cli_app.py @@ -1,9 +1,13 @@ from __future__ import annotations +import contextlib import json import logging +import os import sys import threading +import time +from collections.abc import Iterator from pathlib import Path from typing import Annotated, Any @@ -14,7 +18,9 @@ from rich.progress_bar import ProgressBar from rich.table import Table from rich.text import Text +from typer import Context +from openrtc.metrics_stream import JsonlMetricsSink from openrtc.pool import AgentConfig, AgentPool from openrtc.resources import ( PoolRuntimeSnapshot, @@ -27,9 +33,28 @@ logger = logging.getLogger("openrtc") +PANEL_OPENRTC = "OpenRTC" +PANEL_LIVEKIT = "Connection" +PANEL_ADVANCED = "Advanced" + +_QUICKSTART_EPILOG = ( + "[bold]Typical usage[/bold]: set [code]LIVEKIT_URL[/code], [code]LIVEKIT_API_KEY[/code], " + "and [code]LIVEKIT_API_SECRET[/code], then run " + "[code]openrtc dev --agents-dir PATH[/code] (or [code]start[/code] in production). " + "Defaults are conservative (e.g. no dashboard, 1s refresh); tuning flags are under " + "the [bold]Advanced[/bold] group in each command's [code]--help[/code]." +) + app = typer.Typer( name="openrtc", - help="Discover and run multiple LiveKit agents in one worker.", + help=( + "Run multiple LiveKit voice agents from one shared worker. Commands match " + "LiveKit Agents ([code]dev[/code], [code]start[/code], [code]console[/code], " + "[code]connect[/code], [code]download-files[/code]) plus [code]list[/code] and " + "[code]tui[/code]. Only [code]--agents-dir[/code] is required for worker commands; " + "credentials use [code]LIVEKIT_*[/code] env vars by default (CLI flags optional)." + ), + epilog=_QUICKSTART_EPILOG, pretty_exceptions_show_locals=False, rich_markup_mode="rich", no_args_is_help=True, @@ -39,7 +64,7 @@ class RuntimeReporter: - """Background reporter that renders a Rich dashboard and/or JSON snapshots.""" + """Background reporter: Rich dashboard, static JSON file, and/or JSONL stream.""" def __init__( self, @@ -48,17 +73,33 @@ def __init__( dashboard: bool, refresh_seconds: float, json_output_path: Path | None, + metrics_jsonl_path: Path | None = None, + metrics_jsonl_interval: float | None = None, ) -> None: self._pool = pool self._dashboard = dashboard self._refresh_seconds = max(refresh_seconds, 0.25) self._json_output_path = json_output_path + self._jsonl_interval = ( + max(metrics_jsonl_interval, 0.25) + if metrics_jsonl_interval is not None + else self._refresh_seconds + ) + self._jsonl_sink: JsonlMetricsSink | None = None + if metrics_jsonl_path is not None: + self._jsonl_sink = JsonlMetricsSink(metrics_jsonl_path) + self._jsonl_sink.open() self._stop_event = threading.Event() self._thread: threading.Thread | None = None + self._needs_periodic_file_or_ui = dashboard or json_output_path is not None def start(self) -> None: """Start the background reporter when at least one output is enabled.""" - if not self._dashboard and self._json_output_path is None: + if ( + not self._dashboard + and self._json_output_path is None + and self._jsonl_sink is None + ): return self._thread = threading.Thread( target=self._run, @@ -73,8 +114,43 @@ def stop(self) -> None: if self._thread is not None: self._thread.join(timeout=max(self._refresh_seconds * 2, 1.0)) self._write_json_snapshot() + self._emit_jsonl() + if self._jsonl_sink is not None: + self._jsonl_sink.close() def _run(self) -> None: + now = time.monotonic() + next_periodic = ( + now + self._refresh_seconds + if self._needs_periodic_file_or_ui + else float("inf") + ) + next_jsonl = now + self._jsonl_interval if self._jsonl_sink else float("inf") + + def schedule_cycle(live: Live | None) -> bool: + """Wait until the next tick; run JSON/JSONL/dashboard work. Return False to exit.""" + nonlocal next_periodic, next_jsonl + n = time.monotonic() + wait_periodic = max(0.0, next_periodic - n) + wait_jsonl = ( + max(0.0, next_jsonl - n) + if self._jsonl_sink is not None + else float("inf") + ) + timeout = min(wait_periodic, wait_jsonl, 3600.0) + if self._stop_event.wait(timeout): + return False + n = time.monotonic() + if self._needs_periodic_file_or_ui and n >= next_periodic: + if live is not None: + live.update(self._build_dashboard_renderable()) + self._write_json_snapshot() + next_periodic += self._refresh_seconds + if self._jsonl_sink is not None and n >= next_jsonl: + self._emit_jsonl() + next_jsonl += self._jsonl_interval + return True + if self._dashboard: with Live( self._build_dashboard_renderable(), @@ -82,14 +158,13 @@ def _run(self) -> None: refresh_per_second=max(int(round(1 / self._refresh_seconds)), 1), transient=True, ) as live: - while not self._stop_event.wait(self._refresh_seconds): - live.update(self._build_dashboard_renderable()) - self._write_json_snapshot() + while schedule_cycle(live): + pass live.update(self._build_dashboard_renderable()) return - while not self._stop_event.wait(self._refresh_seconds): - self._write_json_snapshot() + while schedule_cycle(None): + pass def _build_dashboard_renderable(self) -> Panel: snapshot = self._pool.runtime_snapshot() @@ -105,6 +180,14 @@ def _write_json_snapshot(self) -> None: encoding="utf-8", ) + def _emit_jsonl(self) -> None: + """Write one snapshot line then any queued session events (same tick).""" + if self._jsonl_sink is None: + return + self._jsonl_sink.write_snapshot(self._pool.runtime_snapshot()) + for ev in self._pool.drain_metrics_stream_events(): + self._jsonl_sink.write_event(ev) + def _format_percent(saved_bytes: int | None, baseline_bytes: int | None) -> str: if saved_bytes is None or baseline_bytes in (None, 0): @@ -251,19 +334,208 @@ def _pool_kwargs( } +_OPENRTC_ONLY_FLAGS_WITH_VALUE: frozenset[str] = frozenset( + { + "--agents-dir", + "--default-stt", + "--default-llm", + "--default-tts", + "--default-greeting", + "--dashboard-refresh", + "--metrics-json-file", + "--metrics-jsonl", + "--metrics-jsonl-interval", + } +) +_OPENRTC_ONLY_BOOL_FLAGS: frozenset[str] = frozenset({"--dashboard"}) + + +def _strip_openrtc_only_flags_for_livekit(argv_tail: list[str]) -> list[str]: + """Drop OpenRTC-only CLI flags; LiveKit's ``run_app`` parses ``sys.argv`` itself. + + ``openrtc start`` / ``openrtc dev`` are implemented with Typer, then delegate to + :func:`livekit.agents.cli.run_app`, which builds a separate Typer application + that does not recognize OpenRTC options such as ``--agents-dir``. Those must + be removed before the handoff while preserving any forwarded LiveKit flags + (e.g. ``--reload``, ``--url``) when we add pass-through options later. + + For flags in ``_OPENRTC_ONLY_FLAGS_WITH_VALUE``, the **next** token is always + consumed as the value when present, even if it starts with ``--`` (e.g. a + path or provider string must not be mistaken for a following flag). + """ + out: list[str] = [] + i = 0 + while i < len(argv_tail): + arg = argv_tail[i] + if arg == "--": + out.extend(argv_tail[i:]) + break + if "=" in arg: + name = arg.split("=", 1)[0] + if ( + name in _OPENRTC_ONLY_FLAGS_WITH_VALUE + or name in _OPENRTC_ONLY_BOOL_FLAGS + ): + i += 1 + continue + out.append(arg) + i += 1 + continue + if arg in _OPENRTC_ONLY_BOOL_FLAGS: + i += 1 + continue + if arg in _OPENRTC_ONLY_FLAGS_WITH_VALUE: + i += 1 + if i < len(argv_tail): + i += 1 + continue + out.append(arg) + i += 1 + return out + + def _livekit_sys_argv(subcommand: str) -> None: - """Set ``sys.argv`` for ``livekit.agents.cli.run_app`` without dropping user flags. + """Set ``sys.argv`` for ``livekit.agents.cli.run_app``. + + OpenRTC-specific options are stripped because the LiveKit CLI re-parses + ``sys.argv`` and only accepts its own flags per subcommand. When the process was not started as ``openrtc ...`` (e.g. tests that patch ``sys.argv``), only ``[argv0, subcommand]`` is used. """ prog = sys.argv[0] if len(sys.argv) >= 2 and sys.argv[1] == subcommand: - sys.argv = [prog, subcommand, *sys.argv[2:]] + rest = _strip_openrtc_only_flags_for_livekit(list(sys.argv[2:])) + sys.argv = [prog, subcommand, *rest] else: sys.argv = [prog, subcommand] +_LIVEKIT_ENV_OVERRIDE_KEYS: tuple[str, ...] = ( + "LIVEKIT_URL", + "LIVEKIT_API_KEY", + "LIVEKIT_API_SECRET", + "LIVEKIT_LOG_LEVEL", +) + + +def _snapshot_livekit_env() -> dict[str, str | None]: + return {key: os.environ.get(key) for key in _LIVEKIT_ENV_OVERRIDE_KEYS} + + +def _restore_livekit_env(snapshot: dict[str, str | None]) -> None: + for key, previous in snapshot.items(): + if previous is None: + os.environ.pop(key, None) + else: + os.environ[key] = previous + + +@contextlib.contextmanager +def _livekit_env_overrides( + *, + url: str | None, + api_key: str | None, + api_secret: str | None, + log_level: str | None, +) -> Iterator[None]: + """Temporarily set LiveKit env vars; restore previous values on exit.""" + snapshot = _snapshot_livekit_env() + try: + if url is not None: + os.environ["LIVEKIT_URL"] = url + if api_key is not None: + os.environ["LIVEKIT_API_KEY"] = api_key + if api_secret is not None: + os.environ["LIVEKIT_API_SECRET"] = api_secret + if log_level is not None: + os.environ["LIVEKIT_LOG_LEVEL"] = log_level + yield + finally: + _restore_livekit_env(snapshot) + + +def _delegate_discovered_pool_to_livekit( + *, + agents_dir: Path, + subcommand: str, + default_stt: str | None, + default_llm: str | None, + default_tts: str | None, + default_greeting: str | None, + dashboard: bool, + dashboard_refresh: float, + metrics_json_file: Path | None, + metrics_jsonl: Path | None, + metrics_jsonl_interval: float | None, + url: str | None, + api_key: str | None, + api_secret: str | None, + log_level: str | None, +) -> None: + """Discover agents, optionally set connection env, then run a LiveKit CLI subcommand.""" + pool = AgentPool( + **_pool_kwargs(default_stt, default_llm, default_tts, default_greeting) + ) + _discover_or_exit(agents_dir, pool) + with _livekit_env_overrides( + url=url, api_key=api_key, api_secret=api_secret, log_level=log_level + ): + _livekit_sys_argv(subcommand) + _run_pool_with_reporting( + pool, + dashboard=dashboard, + dashboard_refresh=dashboard_refresh, + metrics_json_file=metrics_json_file, + metrics_jsonl=metrics_jsonl, + metrics_jsonl_interval=metrics_jsonl_interval, + ) + + +def _run_connect_handoff( + *, + agents_dir: Path, + default_stt: str | None, + default_llm: str | None, + default_tts: str | None, + default_greeting: str | None, + room: str, + participant_identity: str | None, + log_level: str | None, + url: str | None, + api_key: str | None, + api_secret: str | None, + dashboard: bool, + dashboard_refresh: float, + metrics_json_file: Path | None, + metrics_jsonl: Path | None, + metrics_jsonl_interval: float | None, +) -> None: + """Hand off to LiveKit ``connect`` with explicit argv (Typer consumes flags first).""" + pool = AgentPool( + **_pool_kwargs(default_stt, default_llm, default_tts, default_greeting) + ) + _discover_or_exit(agents_dir, pool) + with _livekit_env_overrides( + url=url, api_key=api_key, api_secret=api_secret, log_level=None + ): + prog = sys.argv[0] + tail: list[str] = ["connect", "--room", room] + if participant_identity is not None: + tail.extend(["--participant-identity", participant_identity]) + if log_level is not None: + tail.extend(["--log-level", log_level]) + sys.argv = [prog, *tail] + _run_pool_with_reporting( + pool, + dashboard=dashboard, + dashboard_refresh=dashboard_refresh, + metrics_json_file=metrics_json_file, + metrics_jsonl=metrics_jsonl, + metrics_jsonl_interval=metrics_jsonl_interval, + ) + + def _discover_or_exit(agents_dir: Path, pool: AgentPool) -> list[AgentConfig]: try: discovered = pool.discover(agents_dir) @@ -302,10 +574,11 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: Path, typer.Option( "--agents-dir", - help="Directory containing discoverable agent modules.", + help="Directory of agent modules to load (only required flag for most workflows).", exists=False, resolve_path=True, path_type=Path, + rich_help_panel=PANEL_OPENRTC, ), ] @@ -317,6 +590,7 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: "Default STT provider used when a discovered agent does not " "override STT via @agent_config(...)." ), + rich_help_panel=PANEL_OPENRTC, ), ] @@ -328,6 +602,7 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: "Default LLM provider used when a discovered agent does not " "override LLM via @agent_config(...)." ), + rich_help_panel=PANEL_OPENRTC, ), ] @@ -339,6 +614,7 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: "Default TTS provider used when a discovered agent does not " "override TTS via @agent_config(...)." ), + rich_help_panel=PANEL_OPENRTC, ), ] @@ -350,6 +626,7 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: "Default greeting used when a discovered agent does not " "override greeting via @agent_config(...)." ), + rich_help_panel=PANEL_ADVANCED, ), ] @@ -357,7 +634,8 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: bool, typer.Option( "--dashboard", - help="Show a live Rich dashboard with worker memory and active sessions.", + help="Show a live Rich dashboard (off by default; use for local debugging).", + rich_help_panel=PANEL_OPENRTC, ), ] @@ -366,7 +644,8 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: typer.Option( "--dashboard-refresh", min=0.25, - help="Refresh interval in seconds for the runtime dashboard and metrics file.", + help="Refresh interval in seconds for dashboard / metrics file / JSONL (default 1s).", + rich_help_panel=PANEL_ADVANCED, ), ] @@ -374,9 +653,113 @@ def _truncate_cell(text: str, max_len: int = 36) -> str: Path | None, typer.Option( "--metrics-json-file", - help="Write live runtime metrics snapshots to this JSON file for automation.", + help="Overwrite a JSON file each tick with the latest snapshot (automation / CI).", + resolve_path=True, + path_type=Path, + rich_help_panel=PANEL_ADVANCED, + ), +] + +MetricsJsonlArg = Annotated[ + Path | None, + typer.Option( + "--metrics-jsonl", + help=( + "Append JSON Lines for ``openrtc tui --watch`` (off by default; " + "truncates when the worker starts)." + ), + resolve_path=True, + path_type=Path, + rich_help_panel=PANEL_OPENRTC, + ), +] + +MetricsJsonlIntervalArg = Annotated[ + float | None, + typer.Option( + "--metrics-jsonl-interval", + min=0.25, + help=("Seconds between JSONL records (default: same as --dashboard-refresh)."), + rich_help_panel=PANEL_ADVANCED, + ), +] + +TuiWatchPathArg = Annotated[ + Path, + typer.Option( + "--watch", + help="JSONL file written by the worker's --metrics-jsonl.", resolve_path=True, path_type=Path, + rich_help_panel=PANEL_OPENRTC, + ), +] + +TuiFromStartArg = Annotated[ + bool, + typer.Option( + "--from-start", + help="Read the file from the beginning instead of tailing from EOF.", + rich_help_panel=PANEL_ADVANCED, + ), +] + +LiveKitUrlArg = Annotated[ + str | None, + typer.Option( + "--url", + help="WebSocket URL of the LiveKit server or Cloud project.", + envvar="LIVEKIT_URL", + rich_help_panel=PANEL_LIVEKIT, + ), +] + +LiveKitApiKeyArg = Annotated[ + str | None, + typer.Option( + "--api-key", + help="API key for the LiveKit server or Cloud project.", + envvar="LIVEKIT_API_KEY", + rich_help_panel=PANEL_LIVEKIT, + ), +] + +LiveKitApiSecretArg = Annotated[ + str | None, + typer.Option( + "--api-secret", + help="API secret for the LiveKit server or Cloud project.", + envvar="LIVEKIT_API_SECRET", + rich_help_panel=PANEL_LIVEKIT, + ), +] + +ConnectRoomArg = Annotated[ + str, + typer.Option( + "--room", + help="Room name to connect to (same as LiveKit Agents [code]connect[/code]).", + rich_help_panel=PANEL_LIVEKIT, + ), +] + +ConnectParticipantArg = Annotated[ + str | None, + typer.Option( + "--participant-identity", + help="Agent participant identity when connecting to the room.", + rich_help_panel=PANEL_ADVANCED, + ), +] + +LiveKitLogLevelArg = Annotated[ + str | None, + typer.Option( + "--log-level", + help="Log level (e.g. DEBUG, INFO, WARN, ERROR).", + envvar="LIVEKIT_LOG_LEVEL", + case_sensitive=False, + rich_help_panel=PANEL_ADVANCED, ), ] @@ -394,6 +777,7 @@ def list_command( "Memory line is OS-specific (Linux: current VmRSS; macOS: peak " "ru_maxrss, not live RSS—see JSON description)." ), + rich_help_panel=PANEL_ADVANCED, ), ] = False, json_output: Annotated[ @@ -565,68 +949,232 @@ def _build_list_json_payload( return payload -@app.command("start") +_LIVEKIT_CLI_CONTEXT_SETTINGS = { + "allow_extra_args": True, + "ignore_unknown_options": True, +} + + +@app.command("start", context_settings=_LIVEKIT_CLI_CONTEXT_SETTINGS) def start_command( + _ctx: Context, agents_dir: AgentsDirArg, default_stt: DefaultSttArg = None, default_llm: DefaultLlmArg = None, default_tts: DefaultTtsArg = None, default_greeting: DefaultGreetingArg = None, + url: LiveKitUrlArg = None, + api_key: LiveKitApiKeyArg = None, + api_secret: LiveKitApiSecretArg = None, + log_level: LiveKitLogLevelArg = None, dashboard: DashboardArg = False, dashboard_refresh: DashboardRefreshArg = 1.0, metrics_json_file: MetricsJsonFileArg = None, + metrics_jsonl: MetricsJsonlArg = None, + metrics_jsonl_interval: MetricsJsonlIntervalArg = None, ) -> None: - """Run the LiveKit worker (production-style entrypoint).""" - pool = AgentPool( - **_pool_kwargs(default_stt, default_llm, default_tts, default_greeting) - ) - _discover_or_exit(agents_dir, pool) - _livekit_sys_argv("start") - _run_pool_with_reporting( - pool, + """Run the worker (same role as [code]python agent.py start[/code] with LiveKit).""" + _delegate_discovered_pool_to_livekit( + agents_dir=agents_dir, + subcommand="start", + default_stt=default_stt, + default_llm=default_llm, + default_tts=default_tts, + default_greeting=default_greeting, dashboard=dashboard, dashboard_refresh=dashboard_refresh, metrics_json_file=metrics_json_file, + metrics_jsonl=metrics_jsonl, + metrics_jsonl_interval=metrics_jsonl_interval, + url=url, + api_key=api_key, + api_secret=api_secret, + log_level=log_level, ) -@app.command("dev") +@app.command("dev", context_settings=_LIVEKIT_CLI_CONTEXT_SETTINGS) def dev_command( + _ctx: Context, agents_dir: AgentsDirArg, default_stt: DefaultSttArg = None, default_llm: DefaultLlmArg = None, default_tts: DefaultTtsArg = None, default_greeting: DefaultGreetingArg = None, + url: LiveKitUrlArg = None, + api_key: LiveKitApiKeyArg = None, + api_secret: LiveKitApiSecretArg = None, + log_level: LiveKitLogLevelArg = None, dashboard: DashboardArg = False, dashboard_refresh: DashboardRefreshArg = 1.0, metrics_json_file: MetricsJsonFileArg = None, + metrics_jsonl: MetricsJsonlArg = None, + metrics_jsonl_interval: MetricsJsonlIntervalArg = None, ) -> None: - """Run the LiveKit worker in development mode.""" - pool = AgentPool( - **_pool_kwargs(default_stt, default_llm, default_tts, default_greeting) + """Development worker with reload (same role as [code]python agent.py dev[/code]).""" + _delegate_discovered_pool_to_livekit( + agents_dir=agents_dir, + subcommand="dev", + default_stt=default_stt, + default_llm=default_llm, + default_tts=default_tts, + default_greeting=default_greeting, + dashboard=dashboard, + dashboard_refresh=dashboard_refresh, + metrics_json_file=metrics_json_file, + metrics_jsonl=metrics_jsonl, + metrics_jsonl_interval=metrics_jsonl_interval, + url=url, + api_key=api_key, + api_secret=api_secret, + log_level=log_level, ) - _discover_or_exit(agents_dir, pool) - _livekit_sys_argv("dev") - _run_pool_with_reporting( - pool, + + +@app.command("console", context_settings=_LIVEKIT_CLI_CONTEXT_SETTINGS) +def console_command( + _ctx: Context, + agents_dir: AgentsDirArg, + default_stt: DefaultSttArg = None, + default_llm: DefaultLlmArg = None, + default_tts: DefaultTtsArg = None, + default_greeting: DefaultGreetingArg = None, + url: LiveKitUrlArg = None, + api_key: LiveKitApiKeyArg = None, + api_secret: LiveKitApiSecretArg = None, + log_level: LiveKitLogLevelArg = None, + dashboard: DashboardArg = False, + dashboard_refresh: DashboardRefreshArg = 1.0, + metrics_json_file: MetricsJsonFileArg = None, + metrics_jsonl: MetricsJsonlArg = None, + metrics_jsonl_interval: MetricsJsonlIntervalArg = None, +) -> None: + """Local console session (same role as [code]python agent.py console[/code]).""" + _delegate_discovered_pool_to_livekit( + agents_dir=agents_dir, + subcommand="console", + default_stt=default_stt, + default_llm=default_llm, + default_tts=default_tts, + default_greeting=default_greeting, + dashboard=dashboard, + dashboard_refresh=dashboard_refresh, + metrics_json_file=metrics_json_file, + metrics_jsonl=metrics_jsonl, + metrics_jsonl_interval=metrics_jsonl_interval, + url=url, + api_key=api_key, + api_secret=api_secret, + log_level=log_level, + ) + + +@app.command("connect", context_settings=_LIVEKIT_CLI_CONTEXT_SETTINGS) +def connect_command( + _ctx: Context, + agents_dir: AgentsDirArg, + room: ConnectRoomArg, + default_stt: DefaultSttArg = None, + default_llm: DefaultLlmArg = None, + default_tts: DefaultTtsArg = None, + default_greeting: DefaultGreetingArg = None, + participant_identity: ConnectParticipantArg = None, + log_level: LiveKitLogLevelArg = None, + url: LiveKitUrlArg = None, + api_key: LiveKitApiKeyArg = None, + api_secret: LiveKitApiSecretArg = None, + dashboard: DashboardArg = False, + dashboard_refresh: DashboardRefreshArg = 1.0, + metrics_json_file: MetricsJsonFileArg = None, + metrics_jsonl: MetricsJsonlArg = None, + metrics_jsonl_interval: MetricsJsonlIntervalArg = None, +) -> None: + """Connect the worker to an existing room (LiveKit [code]connect[/code]).""" + _run_connect_handoff( + agents_dir=agents_dir, + default_stt=default_stt, + default_llm=default_llm, + default_tts=default_tts, + default_greeting=default_greeting, + room=room, + participant_identity=participant_identity, + log_level=log_level, + url=url, + api_key=api_key, + api_secret=api_secret, dashboard=dashboard, dashboard_refresh=dashboard_refresh, metrics_json_file=metrics_json_file, + metrics_jsonl=metrics_jsonl, + metrics_jsonl_interval=metrics_jsonl_interval, ) +@app.command("download-files") +def download_files_command( + agents_dir: AgentsDirArg, + url: LiveKitUrlArg = None, + api_key: LiveKitApiKeyArg = None, + api_secret: LiveKitApiSecretArg = None, + log_level: LiveKitLogLevelArg = None, +) -> None: + """Download plugin assets (LiveKit [code]download-files[/code]). + + Uses the same discovery path as other commands so the worker entrypoint is + valid; provider defaults are not needed for this subcommand. + """ + _delegate_discovered_pool_to_livekit( + agents_dir=agents_dir, + subcommand="download-files", + default_stt=None, + default_llm=None, + default_tts=None, + default_greeting=None, + dashboard=False, + dashboard_refresh=1.0, + metrics_json_file=None, + metrics_jsonl=None, + metrics_jsonl_interval=None, + url=url, + api_key=api_key, + api_secret=api_secret, + log_level=log_level, + ) + + +@app.command("tui") +def tui_command( + watch: TuiWatchPathArg, + from_start: TuiFromStartArg = False, +) -> None: + """Sidecar Textual UI for a --metrics-jsonl stream (requires the ``tui`` extra).""" + try: + from openrtc.tui_app import run_metrics_tui + except ImportError as exc: + logger.error( + "The TUI requires Textual. Install with: pip install 'openrtc[tui]' " + "(the cli extra is required for the openrtc command)." + ) + raise typer.Exit(code=1) from exc + run_metrics_tui(watch, from_start=from_start) + + def _run_pool_with_reporting( pool: AgentPool, *, dashboard: bool, dashboard_refresh: float, metrics_json_file: Path | None, + metrics_jsonl: Path | None = None, + metrics_jsonl_interval: float | None = None, ) -> None: reporter = RuntimeReporter( pool, dashboard=dashboard, refresh_seconds=dashboard_refresh, json_output_path=metrics_json_file, + metrics_jsonl_path=metrics_jsonl, + metrics_jsonl_interval=metrics_jsonl_interval, ) reporter.start() try: @@ -741,7 +1289,8 @@ def main(argv: list[str] | None = None) -> int: still use CliRunner). Pass ``args`` without the program name when invoking programmatically; ``prog_name`` matches the ``openrtc`` console script. - ``start`` / ``dev`` mutate :data:`sys.argv` before ``pool.run()``; we restore + Worker subcommands (``start``, ``dev``, ``console``, ``connect``, + ``download-files``) mutate :data:`sys.argv` before ``pool.run()``; we restore the previous argv list after the command finishes so programmatic callers are not polluted. """ diff --git a/src/openrtc/metrics_stream.py b/src/openrtc/metrics_stream.py new file mode 100644 index 0000000..4e33f48 --- /dev/null +++ b/src/openrtc/metrics_stream.py @@ -0,0 +1,138 @@ +"""Sidecar metrics stream for workers (JSON Lines over a file or socket). + +Each line is one JSON object (envelope) so a separate TUI or script can tail the +file. This is the contract for ``openrtc tui --watch``. + +**Envelope (schema version 1)** + +* ``schema_version`` (int): bump on breaking payload changes. +* ``kind`` (str): ``"snapshot"`` for full pool state; future kinds may add events. +* ``seq`` (int): monotonically increasing counter for this worker process. +* ``wall_time_unix`` (float): ``time.time()`` when the record was emitted. +* ``payload`` (dict): for ``kind == "snapshot"``, same shape as + :meth:`PoolRuntimeSnapshot.to_dict`; for ``kind == "event"``, small dicts such + as ``{"event": "session_started", "agent": "..."}``. +""" + +from __future__ import annotations + +import json +import time +from pathlib import Path +from threading import Lock +from typing import Any + +from openrtc.resources import PoolRuntimeSnapshot + +METRICS_STREAM_SCHEMA_VERSION = 1 +KIND_SNAPSHOT = "snapshot" +KIND_EVENT = "event" + + +def snapshot_envelope(*, seq: int, snapshot: PoolRuntimeSnapshot) -> dict[str, Any]: + """Build a versioned JSON object for one line of the metrics stream.""" + return { + "schema_version": METRICS_STREAM_SCHEMA_VERSION, + "kind": KIND_SNAPSHOT, + "seq": seq, + "wall_time_unix": time.time(), + "payload": snapshot.to_dict(), + } + + +def _metrics_json_seq_ok(value: object) -> bool: + """``seq`` must be a JSON integer (reject bool, which subclasses int).""" + return isinstance(value, int) and not isinstance(value, bool) + + +def _metrics_json_wall_ok(value: object) -> bool: + if isinstance(value, bool): + return False + return isinstance(value, (int, float)) + + +def parse_metrics_jsonl_line(line: str) -> dict[str, Any] | None: + """Return a parsed stream record (snapshot or event), or ``None`` if invalid.""" + stripped = line.strip() + if not stripped: + return None + try: + record: dict[str, Any] = json.loads(stripped) + except json.JSONDecodeError: + return None + if record.get("schema_version") != METRICS_STREAM_SCHEMA_VERSION: + return None + kind = record.get("kind") + if kind not in (KIND_SNAPSHOT, KIND_EVENT): + return None + seq = record.get("seq") + if not _metrics_json_seq_ok(seq): + return None + wall = record.get("wall_time_unix") + if not _metrics_json_wall_ok(wall): + return None + payload = record.get("payload") + if payload is None or not isinstance(payload, dict): + return None + return record + + +def event_envelope(*, seq: int, payload: dict[str, Any]) -> dict[str, Any]: + """Build a JSON object for one session lifecycle (or similar) event line.""" + return { + "schema_version": METRICS_STREAM_SCHEMA_VERSION, + "kind": KIND_EVENT, + "seq": seq, + "wall_time_unix": time.time(), + "payload": dict(payload), + } + + +class JsonlMetricsSink: + """Append-only JSONL writer; truncates the file when opened (new worker run).""" + + def __init__(self, path: Path) -> None: + self._path = path + self._file: Any = None + self._seq = 0 + self._lock = Lock() + + def open(self) -> None: + """Create parent dirs and open the JSONL file for writing. + + Uses ``self._path.open("w", ...)``, which **truncates** any existing file. + That is intentional: each worker run starts a fresh stream (see class doc). + """ + self._path.parent.mkdir(parents=True, exist_ok=True) + self._file = self._path.open("w", encoding="utf-8") + + def write_snapshot(self, snapshot: PoolRuntimeSnapshot) -> None: + """Serialize one snapshot line and flush (thread-safe).""" + with self._lock: + if self._file is None: + raise RuntimeError("JsonlMetricsSink.open() was not called") + self._seq += 1 + record = snapshot_envelope(seq=self._seq, snapshot=snapshot) + self._file.write(json.dumps(record, sort_keys=True) + "\n") + self._file.flush() + + def write_event(self, payload: dict[str, Any]) -> None: + """Append one event line after the current ``seq`` (thread-safe).""" + with self._lock: + if self._file is None: + raise RuntimeError("JsonlMetricsSink.open() was not called") + self._seq += 1 + record = event_envelope(seq=self._seq, payload=payload) + self._file.write(json.dumps(record, sort_keys=True) + "\n") + self._file.flush() + + def close(self) -> None: + with self._lock: + if self._file is not None: + self._file.close() + self._file = None + + @property + def seq(self) -> int: + with self._lock: + return self._seq diff --git a/src/openrtc/pool.py b/src/openrtc/pool.py index 5984ef0..6ba761b 100644 --- a/src/openrtc/pool.py +++ b/src/openrtc/pool.py @@ -18,7 +18,11 @@ from livekit.agents import Agent, AgentServer, AgentSession, JobContext, JobProcess, cli -from openrtc.resources import PoolRuntimeSnapshot, RuntimeMetricsStore +from openrtc.resources import ( + MetricsStreamEvent, + PoolRuntimeSnapshot, + RuntimeMetricsStore, +) logger = logging.getLogger("openrtc") @@ -270,6 +274,10 @@ def runtime_snapshot(self) -> PoolRuntimeSnapshot: """Return a live snapshot of worker metrics for dashboards and automation.""" return self._runtime_state.metrics.snapshot(registered_agents=len(self._agents)) + def drain_metrics_stream_events(self) -> list[MetricsStreamEvent]: + """Drain pending session lifecycle events for JSONL sidecar export.""" + return self._runtime_state.metrics.drain_stream_events() + def add( self, name: str, diff --git a/src/openrtc/resources.py b/src/openrtc/resources.py index acb1c6e..64cb318 100644 --- a/src/openrtc/resources.py +++ b/src/openrtc/resources.py @@ -3,17 +3,34 @@ import logging import sys import time +from collections import deque from collections.abc import Mapping, Sequence from dataclasses import dataclass, field from pathlib import Path from threading import Lock -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, TypedDict if TYPE_CHECKING: from openrtc.pool import AgentConfig logger = logging.getLogger("openrtc") +_STREAM_EVENTS_MAXLEN = 256 + + +class MetricsStreamEvent(TypedDict, total=False): + """One drained session lifecycle row for JSONL export. + + Rows always include ``event`` and ``agent`` from the store; ``session_failed`` + rows may include ``error``. A synthetic ``metrics_stream_overflow`` row may + include ``overflow_dropped``. + """ + + event: str + agent: str + error: str + overflow_dropped: int + @dataclass(frozen=True, slots=True) class AgentDiskFootprint: @@ -109,8 +126,22 @@ class RuntimeMetricsStore: last_error: str | None = None sessions_by_agent: dict[str, int] = field(default_factory=dict) _lock: Lock = field(default_factory=Lock, init=False, repr=False, compare=False) + _stream_events: deque[MetricsStreamEvent] = field( + default_factory=deque, + init=False, + repr=False, + compare=False, + ) + _metrics_stream_overflow_since_drain: int = field( + default=0, + init=False, + repr=False, + compare=False, + ) def __getstate__(self) -> dict[str, object]: + with self._lock: + stream_events = list(self._stream_events) return { "started_at": self.started_at, "total_sessions_started": self.total_sessions_started, @@ -118,6 +149,8 @@ def __getstate__(self) -> dict[str, object]: "last_routed_agent": self.last_routed_agent, "last_error": self.last_error, "sessions_by_agent": dict(self.sessions_by_agent), + "_stream_events": stream_events, + "_metrics_stream_overflow_since_drain": self._metrics_stream_overflow_since_drain, } def __setstate__(self, state: Mapping[str, object]) -> None: @@ -130,8 +163,24 @@ def __setstate__(self, state: Mapping[str, object]) -> None: str(key): int(value) for key, value in dict(state["sessions_by_agent"]).items() } + raw_events = state.get("_stream_events", []) + self._stream_events = deque(raw_events) + self._metrics_stream_overflow_since_drain = int( + state.get("_metrics_stream_overflow_since_drain", 0) + ) self._lock = Lock() + def _append_stream_event_locked(self, event: MetricsStreamEvent) -> None: + if len(self._stream_events) >= _STREAM_EVENTS_MAXLEN: + self._metrics_stream_overflow_since_drain += 1 + logger.warning( + "metrics stream buffer full (%s events); dropping event %r", + _STREAM_EVENTS_MAXLEN, + event.get("event"), + ) + return + self._stream_events.append(event) + def record_session_started(self, agent_name: str) -> None: """Increment active counters for one routed session.""" with self._lock: @@ -140,6 +189,9 @@ def record_session_started(self, agent_name: str) -> None: self.sessions_by_agent[agent_name] = ( self.sessions_by_agent.get(agent_name, 0) + 1 ) + self._append_stream_event_locked( + {"event": "session_started", "agent": agent_name}, + ) def record_session_finished(self, agent_name: str) -> None: """Decrement active counters once a session exits.""" @@ -150,6 +202,9 @@ def record_session_finished(self, agent_name: str) -> None: self.sessions_by_agent[agent_name] = next_value else: self.sessions_by_agent.pop(agent_name, None) + self._append_stream_event_locked( + {"event": "session_finished", "agent": agent_name}, + ) def record_session_failure(self, agent_name: str, exc: BaseException) -> None: """Track a failed session attempt with the most recent error.""" @@ -157,6 +212,30 @@ def record_session_failure(self, agent_name: str, exc: BaseException) -> None: self.last_routed_agent = agent_name self.total_session_failures += 1 self.last_error = f"{exc.__class__.__name__}: {exc}" + self._append_stream_event_locked( + { + "event": "session_failed", + "agent": agent_name, + "error": f"{exc.__class__.__name__}: {exc}"[:500], + }, + ) + + def drain_stream_events(self) -> list[MetricsStreamEvent]: + """Remove and return pending stream events for JSONL export (order preserved).""" + with self._lock: + out = list(self._stream_events) + self._stream_events.clear() + dropped = self._metrics_stream_overflow_since_drain + self._metrics_stream_overflow_since_drain = 0 + if dropped > 0: + out.append( + { + "event": "metrics_stream_overflow", + "agent": "__openrtc__", + "overflow_dropped": dropped, + }, + ) + return out def snapshot(self, *, registered_agents: int) -> PoolRuntimeSnapshot: """Return a typed snapshot for dashboards and automation.""" diff --git a/src/openrtc/tui_app.py b/src/openrtc/tui_app.py new file mode 100644 index 0000000..384a6c8 --- /dev/null +++ b/src/openrtc/tui_app.py @@ -0,0 +1,166 @@ +"""Textual sidecar UI for tailing :mod:`openrtc.metrics_stream` JSONL output.""" + +from __future__ import annotations + +import os +from pathlib import Path +from typing import Any, TextIO + +from textual.app import App, ComposeResult +from textual.widgets import Footer, Header, Static + +from openrtc.metrics_stream import KIND_EVENT, KIND_SNAPSHOT, parse_metrics_jsonl_line + + +class MetricsTuiApp(App[None]): + """Tail ``--metrics-jsonl`` and show live pool metrics.""" + + TITLE = "OpenRTC metrics" + BINDINGS = [("q", "quit", "Quit")] + + def __init__(self, watch_path: Path, *, from_start: bool = False) -> None: + super().__init__() + self._path = watch_path.resolve() + self._from_start = from_start + self._fh: TextIO | None = None + self._buf = "" + self._latest: dict[str, Any] | None = None + self._last_event: dict[str, Any] | None = None + self._path_st_ino: int | None = None + self._path_st_dev: int | None = None + + def compose(self) -> ComposeResult: + yield Header(show_clock=True) + yield Static( + "Waiting for JSONL metrics (run the worker with --metrics-jsonl)…", + id="status", + ) + yield Static("", id="event") + yield Static("", id="agents") + yield Static("", id="detail") + yield Footer() + + def on_mount(self) -> None: + self._path.parent.mkdir(parents=True, exist_ok=True) + if not self._path.exists(): + self._path.touch() + self._open_metrics_file() + self.set_interval(0.25, self._poll_file) + + def on_unmount(self) -> None: + if self._fh is not None: + self._fh.close() + self._fh = None + + def _capture_path_identity(self, st: os.stat_result) -> None: + self._path_st_ino = st.st_ino + self._path_st_dev = st.st_dev + + def _open_metrics_file(self) -> None: + if self._fh is not None: + self._fh.close() + self._fh = None + self._buf = "" + self._fh = self._path.open("r", encoding="utf-8") + if self._from_start: + self._fh.seek(0) + else: + self._fh.seek(0, 2) + st = os.stat(self._path) + self._capture_path_identity(st) + + def _sync_metrics_file_handle(self) -> None: + """Reopen the reader after truncation or path replacement so new bytes are visible.""" + try: + st = os.stat(self._path) + except OSError: + return + if self._fh is None: + self._open_metrics_file() + return + pos = self._fh.tell() + identity_ok = ( + self._path_st_ino is not None + and self._path_st_dev is not None + and st.st_ino == self._path_st_ino + and st.st_dev == self._path_st_dev + ) + if not identity_ok or st.st_size < pos: + self._open_metrics_file() + + def _poll_file(self) -> None: + self._sync_metrics_file_handle() + if self._fh is None: + return + chunk = self._fh.read() + if not chunk: + return + self._buf += chunk + while "\n" in self._buf: + line, self._buf = self._buf.split("\n", 1) + rec = parse_metrics_jsonl_line(line) + if rec is None: + continue + if rec.get("kind") == KIND_SNAPSHOT: + self._latest = rec + self._refresh_view() + elif rec.get("kind") == KIND_EVENT: + pl = rec.get("payload") + if isinstance(pl, dict): + self._last_event = pl + self._refresh_event_line() + + def _refresh_event_line(self) -> None: + if self._last_event is None: + return + ev = self.query_one("#event", Static) + ev.update( + "[bold]Last event[/bold] " + + " ".join(f"{k}={v!r}" for k, v in sorted(self._last_event.items())) + ) + + def _refresh_view(self) -> None: + if self._latest is None: + return + payload = self._latest.get("payload") + if not isinstance(payload, dict): + return + seq = self._latest.get("seq") + wall = self._latest.get("wall_time_unix") + wall_s = "n/a" + if wall is not None: + try: + wall_s = f"{float(wall):.3f}" + except (TypeError, ValueError): + wall_s = "n/a" + status = self.query_one("#status", Static) + status.update( + f"seq={seq} wall={wall_s} registered={payload.get('registered_agents')} " + f"active={payload.get('active_sessions')} " + f"uptime={float(payload.get('uptime_seconds', 0)):.1f}s" + ) + sba = payload.get("sessions_by_agent") or {} + if isinstance(sba, dict): + lines = [f" {name}: {c}" for name, c in sorted(sba.items())] + body = "\n".join(lines) if lines else " (no per-agent sessions yet)" + else: + body = " (invalid payload)" + agents = self.query_one("#agents", Static) + agents.update("[bold]Sessions by agent[/bold]\n" + body) + route = payload.get("last_routed_agent") + err = payload.get("last_error") + detail = self.query_one("#detail", Static) + detail.update( + f"[bold]Last route[/bold] {route or '—'}\n" + f"[bold]Last error[/bold] {err or '—'}\n" + f"[bold]Totals[/bold] started={payload.get('total_sessions_started')} " + f"failures={payload.get('total_session_failures')}" + ) + + def action_quit(self) -> None: + self.exit() + + +def run_metrics_tui(watch_path: Path, *, from_start: bool = False) -> None: + """Run the Textual app until the user quits.""" + MetricsTuiApp(watch_path, from_start=from_start).run() diff --git a/tests/conftest.py b/tests/conftest.py index af55de9..ec02d51 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -78,3 +78,40 @@ def run_app(self, server: AgentServer) -> None: sys.modules["livekit"] = livekit_module sys.modules["livekit.agents"] = agents_module + + +import pytest + +from openrtc.resources import ( + PoolRuntimeSnapshot, + ProcessResidentSetInfo, + SavingsEstimate, +) + + +@pytest.fixture +def minimal_pool_runtime_snapshot() -> PoolRuntimeSnapshot: + """Small :class:`PoolRuntimeSnapshot` for metrics stream / TUI tests.""" + return PoolRuntimeSnapshot( + timestamp=1.0, + uptime_seconds=0.5, + registered_agents=1, + active_sessions=0, + total_sessions_started=0, + total_session_failures=0, + last_routed_agent=None, + last_error=None, + sessions_by_agent={}, + resident_set=ProcessResidentSetInfo( + bytes_value=1024, + metric="test", + description="test", + ), + savings_estimate=SavingsEstimate( + agent_count=1, + shared_worker_bytes=1024, + estimated_separate_workers_bytes=1024, + estimated_saved_bytes=0, + assumptions=(), + ), + ) diff --git a/tests/test_cli.py b/tests/test_cli.py index c2eee16..b988aea 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,8 +1,11 @@ from __future__ import annotations +import builtins import importlib import json import logging +import os +import re import sys from dataclasses import dataclass from pathlib import Path @@ -14,11 +17,20 @@ from openrtc.cli import app, main from openrtc.resources import ( + MetricsStreamEvent, PoolRuntimeSnapshot, ProcessResidentSetInfo, SavingsEstimate, ) +# Rich/Click may inject ANSI and soft-wrap error text; normalize before substring checks. +_ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-?]*[ -/]*[@-~]") + + +def _normalize_cli_output_for_assert(text: str) -> str: + plain = _ANSI_ESCAPE_RE.sub("", text) + return plain.replace("\n", "").replace("\r", "") + @dataclass class StubConfig: @@ -60,6 +72,9 @@ def discover(self, agents_dir: Path) -> list[StubConfig]: def run(self) -> None: self.run_called = True + def drain_metrics_stream_events(self) -> list[MetricsStreamEvent]: + return [] + def runtime_snapshot(self) -> PoolRuntimeSnapshot: self.runtime_snapshot_calls += 1 return PoolRuntimeSnapshot( @@ -185,10 +200,20 @@ def build_pool(**kwargs: Any) -> StubPool: assert created_pools[0].default_greeting == "Hello from OpenRTC." -@pytest.mark.parametrize("command", ["start", "dev"]) +@pytest.mark.parametrize( + ("command", "extra_args"), + [ + ("start", ["--agents-dir", "./agents"]), + ("dev", ["--agents-dir", "./agents"]), + ("console", ["--agents-dir", "./agents"]), + ("download-files", ["--agents-dir", "./agents"]), + ("connect", ["--agents-dir", "./agents", "--room", "demo-room"]), + ], +) def test_run_commands_inject_livekit_mode_and_run_pool( monkeypatch: pytest.MonkeyPatch, command: str, + extra_args: list[str], original_argv: list[str], ) -> None: stub_pool = StubPool( @@ -197,7 +222,7 @@ def test_run_commands_inject_livekit_mode_and_run_pool( monkeypatch.setattr("openrtc.cli_app.AgentPool", lambda **kwargs: stub_pool) monkeypatch.setattr(sys, "argv", original_argv.copy()) - exit_code = main([command, "--agents-dir", "./agents"]) + exit_code = main([command, *extra_args]) assert exit_code == 0 assert stub_pool.run_called is True @@ -216,6 +241,27 @@ def test_cli_returns_non_zero_when_no_agents_are_discovered( assert exit_code == 1 +def test_download_files_has_minimal_options_no_provider_defaults( + tmp_path: Path, +) -> None: + """download-files only needs agents dir + connection; no --default-* flags.""" + runner = CliRunner() + result = runner.invoke( + app, + [ + "download-files", + "--agents-dir", + str(tmp_path), + "--default-stt", + "deepgram/x", + ], + ) + assert result.exit_code == 2 + out = (result.stdout or "") + (result.stderr or "") + normalized = _normalize_cli_output_for_assert(out) + assert re.search(r"default[-_]stt", normalized), normalized[:800] + + def test_list_exits_cleanly_when_agents_dir_does_not_exist( tmp_path: Path, caplog: pytest.LogCaptureFixture, @@ -228,6 +274,104 @@ def test_list_exits_cleanly_when_agents_dir_does_not_exist( assert "does not exist" in caplog.text +def test_strip_openrtc_only_flags_for_livekit_removes_openrtc_options() -> None: + """LiveKit ``run_app`` must not see OpenRTC-only flags (see ``_livekit_sys_argv``).""" + from openrtc.cli_app import _strip_openrtc_only_flags_for_livekit + + tail = [ + "--agents-dir", + "./agents", + "--dashboard", + "--dashboard-refresh", + "2.0", + "--metrics-json-file", + "/tmp/m.json", + "--default-stt", + "x", + "--default-llm", + "y", + "--default-tts", + "z", + "--default-greeting", + "hi", + "--metrics-jsonl", + "/tmp/x.jsonl", + "--metrics-jsonl-interval", + "0.5", + "--reload", + "--log-level", + "DEBUG", + ] + assert _strip_openrtc_only_flags_for_livekit(tail) == [ + "--reload", + "--log-level", + "DEBUG", + ] + assert _strip_openrtc_only_flags_for_livekit(["--agents-dir=./a", "--reload"]) == [ + "--reload" + ] + assert _strip_openrtc_only_flags_for_livekit([]) == [] + assert _strip_openrtc_only_flags_for_livekit( + ["--metrics-json-file", "--not-a-flag", "--reload"], + ) == ["--reload"] + + +def test_dev_passes_reload_through_argv_strip( + monkeypatch: pytest.MonkeyPatch, + tmp_path: Path, +) -> None: + import openrtc.cli_app as cli_app_mod + + agents = tmp_path / "agents" + agents.mkdir() + stub_pool = StubPool(discovered=[StubConfig(name="a", agent_cls=StubAgent)]) + monkeypatch.setattr(cli_app_mod, "AgentPool", lambda **kwargs: stub_pool) + + def _run_pool_stub(pool: StubPool, **kwargs: Any) -> None: + pool.run() + + monkeypatch.setattr(cli_app_mod, "_run_pool_with_reporting", _run_pool_stub) + real_strip = cli_app_mod._strip_openrtc_only_flags_for_livekit + recorded: list[tuple[list[str], list[str]]] = [] + + def recording_strip(tail: list[str]) -> list[str]: + out = real_strip(tail) + recorded.append((list(tail), list(out))) + return out + + monkeypatch.setattr( + cli_app_mod, + "_strip_openrtc_only_flags_for_livekit", + recording_strip, + ) + monkeypatch.setattr( + sys, + "argv", + ["openrtc", "dev", "--agents-dir", str(agents), "--reload"], + ) + exit_code = main(["dev", "--agents-dir", str(agents), "--reload"]) + assert exit_code == 0 + assert stub_pool.run_called + assert recorded + assert recorded[0][1] == ["--reload"] + + +def test_livekit_env_restored_after_delegate_returns( + monkeypatch: pytest.MonkeyPatch, +) -> None: + import openrtc.cli_app as cli_app_mod + + stub_pool = StubPool(discovered=[StubConfig(name="a", agent_cls=StubAgent)]) + monkeypatch.setattr(cli_app_mod, "AgentPool", lambda **kwargs: stub_pool) + monkeypatch.setattr(cli_app_mod, "_run_pool_with_reporting", lambda *a, **k: None) + monkeypatch.setenv("LIVEKIT_URL", "ws://persist") + exit_code = main( + ["start", "--agents-dir", "./agents", "--url", "ws://temporary-override"], + ) + assert exit_code == 0 + assert os.environ.get("LIVEKIT_URL") == "ws://persist" + + def test_cli_entrypoint_documents_optional_extra() -> None: from openrtc.cli import CLI_EXTRA_INSTALL_HINT @@ -404,3 +548,64 @@ def test_start_command_can_write_runtime_metrics_json( assert data["active_sessions"] == 1 assert data["registered_agents"] == 1 assert data["sessions_by_agent"]["restaurant"] == 1 + + +def test_start_command_metrics_jsonl_writes_snapshot_records( + monkeypatch: pytest.MonkeyPatch, + tmp_path: Path, +) -> None: + """``--metrics-jsonl`` produces JSON Lines the sidecar TUI can tail.""" + jsonl = tmp_path / "sidecar.jsonl" + stub_pool = StubPool( + discovered=[StubConfig(name="restaurant", agent_cls=StubAgent)] + ) + monkeypatch.setattr("openrtc.cli_app.AgentPool", lambda **kwargs: stub_pool) + + runner = CliRunner() + result = runner.invoke( + app, + [ + "start", + "--agents-dir", + "./agents", + "--metrics-jsonl", + str(jsonl), + "--metrics-jsonl-interval", + "0.3", + ], + ) + + assert result.exit_code == 0 + assert stub_pool.run_called is True + lines = [ln for ln in jsonl.read_text(encoding="utf-8").split("\n") if ln.strip()] + assert len(lines) >= 1 + first = json.loads(lines[0]) + assert first["schema_version"] == 1 + assert first["kind"] == "snapshot" + assert "payload" in first + assert first["payload"]["registered_agents"] == 1 + + +def test_tui_command_exits_when_textual_is_not_importable( + monkeypatch: pytest.MonkeyPatch, + caplog: pytest.LogCaptureFixture, +) -> None: + """``openrtc tui`` fails fast with a clear message if the TUI extra is absent.""" + real_import = builtins.__import__ + + def guard(name: str, *args: object, **kwargs: object) -> object: + if name == "openrtc.tui_app": + raise ImportError("simulated missing textual") + return real_import(name, *args, **kwargs) + + monkeypatch.setattr(builtins, "__import__", guard) + runner = CliRunner() + with caplog.at_level(logging.ERROR, logger="openrtc"): + result = runner.invoke( + app, + ["tui", "--watch", "./metrics.jsonl"], + catch_exceptions=False, + ) + assert result.exit_code == 1 + assert "Textual" in caplog.text + assert "openrtc[tui]" in caplog.text diff --git a/tests/test_metrics_stream.py b/tests/test_metrics_stream.py new file mode 100644 index 0000000..a2bec06 --- /dev/null +++ b/tests/test_metrics_stream.py @@ -0,0 +1,335 @@ +"""Tests for JSONL metrics stream, sink, and RuntimeReporter export.""" + +from __future__ import annotations + +import json +import logging +import time +from pathlib import Path + +import pytest + +from openrtc.cli_app import RuntimeReporter +from openrtc.metrics_stream import ( + KIND_EVENT, + KIND_SNAPSHOT, + METRICS_STREAM_SCHEMA_VERSION, + JsonlMetricsSink, + parse_metrics_jsonl_line, + snapshot_envelope, +) +from openrtc.resources import ( + MetricsStreamEvent, + PoolRuntimeSnapshot, +) + + +def _read_jsonl_lines(path: Path) -> list[str]: + if not path.exists(): + return [] + return [ln for ln in path.read_text(encoding="utf-8").split("\n") if ln.strip()] + + +def _wait_for_jsonl_lines( + path: Path, + *, + min_lines: int, + timeout: float = 5.0, + poll_interval: float = 0.02, +) -> list[str]: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + lines = _read_jsonl_lines(path) + if len(lines) >= min_lines: + return lines + time.sleep(poll_interval) + raise AssertionError( + f"timed out after {timeout}s waiting for {min_lines} JSONL line(s) in {path!s}", + ) + + +class _StubPool: + def __init__(self, snapshot: PoolRuntimeSnapshot) -> None: + self._snap = snapshot + + def drain_metrics_stream_events(self) -> list[MetricsStreamEvent]: + return [] + + def runtime_snapshot(self) -> PoolRuntimeSnapshot: + return self._snap + + +def test_parse_metrics_jsonl_line() -> None: + good = json.dumps( + { + "schema_version": METRICS_STREAM_SCHEMA_VERSION, + "kind": KIND_SNAPSHOT, + "seq": 9, + "wall_time_unix": 12.0, + "payload": {"registered_agents": 0}, + } + ) + parsed = parse_metrics_jsonl_line(good) + assert parsed is not None + assert parsed["seq"] == 9 + assert parse_metrics_jsonl_line("") is None + assert parse_metrics_jsonl_line("not-json") is None + assert parse_metrics_jsonl_line('{"schema_version": 999}') is None + + +def test_parse_metrics_jsonl_line_rejects_bool_wall_time() -> None: + bad = json.dumps( + { + "schema_version": METRICS_STREAM_SCHEMA_VERSION, + "kind": KIND_SNAPSHOT, + "seq": 1, + "wall_time_unix": True, + "payload": {}, + } + ) + assert parse_metrics_jsonl_line(bad) is None + + +def test_jsonl_sink_seq_property( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + """Cover :attr:`JsonlMetricsSink.seq` (lock + counter).""" + snap = minimal_pool_runtime_snapshot + path = tmp_path / "seq.jsonl" + sink = JsonlMetricsSink(path) + sink.open() + assert sink.seq == 0 + sink.write_snapshot(snap) + assert sink.seq == 1 + sink.write_event({"event": "x"}) + assert sink.seq == 2 + sink.close() + + +def test_parse_metrics_jsonl_line_rejects_malformed_envelope() -> None: + base = { + "schema_version": METRICS_STREAM_SCHEMA_VERSION, + "kind": KIND_SNAPSHOT, + "seq": 1, + "wall_time_unix": 1.0, + "payload": {"x": 1}, + } + bad_seq = {**base, "seq": True} + assert parse_metrics_jsonl_line(json.dumps(bad_seq)) is None + bad_wall = {**base, "wall_time_unix": None} + assert parse_metrics_jsonl_line(json.dumps(bad_wall)) is None + bad_payload = {**base, "payload": None} + assert parse_metrics_jsonl_line(json.dumps(bad_payload)) is None + bad_payload2 = {**base, "payload": [1, 2]} + assert parse_metrics_jsonl_line(json.dumps(bad_payload2)) is None + + +def test_parse_metrics_jsonl_line_rejects_unknown_kind() -> None: + bad = json.dumps( + { + "schema_version": METRICS_STREAM_SCHEMA_VERSION, + "kind": "future-kind", + "seq": 1, + "wall_time_unix": 0.0, + "payload": {}, + } + ) + assert parse_metrics_jsonl_line(bad) is None + + +def test_jsonl_metrics_sink_requires_open_before_write( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + sink = JsonlMetricsSink(tmp_path / "unopened.jsonl") + with pytest.raises(RuntimeError, match="open"): + sink.write_snapshot(minimal_pool_runtime_snapshot) + with pytest.raises(RuntimeError, match="open"): + sink.write_event({"event": "x"}) + + +def test_parse_metrics_jsonl_line_accepts_event() -> None: + line = json.dumps( + { + "schema_version": METRICS_STREAM_SCHEMA_VERSION, + "kind": "event", + "seq": 2, + "wall_time_unix": 3.0, + "payload": {"event": "session_started", "agent": "x"}, + }, + sort_keys=True, + ) + rec = parse_metrics_jsonl_line(line) + assert rec is not None + assert rec["kind"] == "event" + assert rec["payload"]["agent"] == "x" + + +def test_jsonl_sink_writes_snapshot_then_event( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + path = tmp_path / "e.jsonl" + sink = JsonlMetricsSink(path) + sink.open() + sink.write_snapshot(minimal_pool_runtime_snapshot) + sink.write_event({"event": "session_finished", "agent": "a"}) + sink.close() + lines = path.read_text(encoding="utf-8").strip().split("\n") + assert len(lines) == 2 + assert json.loads(lines[0])["kind"] == KIND_SNAPSHOT + assert json.loads(lines[1])["kind"] == "event" + assert json.loads(lines[1])["seq"] == 2 + + +def test_runtime_metrics_store_drains_stream_events() -> None: + from openrtc.resources import RuntimeMetricsStore + + store = RuntimeMetricsStore() + store.record_session_started("dental") + drained = store.drain_stream_events() + assert drained == [{"event": "session_started", "agent": "dental"}] + assert store.drain_stream_events() == [] + + +def test_runtime_metrics_store_overflow_emits_synthetic_on_drain( + monkeypatch: pytest.MonkeyPatch, + caplog: pytest.LogCaptureFixture, +) -> None: + from openrtc import resources as resources_mod + from openrtc.resources import RuntimeMetricsStore + + monkeypatch.setattr(resources_mod, "_STREAM_EVENTS_MAXLEN", 3) + store = RuntimeMetricsStore() + with caplog.at_level(logging.WARNING, logger="openrtc"): + for _ in range(6): + store.record_session_started("x") + drained = store.drain_stream_events() + assert len([e for e in drained if e.get("event") == "session_started"]) == 3 + overflow_rows = [e for e in drained if e.get("event") == "metrics_stream_overflow"] + assert len(overflow_rows) == 1 + assert overflow_rows[0].get("overflow_dropped") == 3 + assert "metrics stream buffer full" in caplog.text + assert store.drain_stream_events() == [] + + +def test_snapshot_envelope_shape( + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + snap = minimal_pool_runtime_snapshot + env = snapshot_envelope(seq=7, snapshot=snap) + assert env["schema_version"] == METRICS_STREAM_SCHEMA_VERSION + assert env["kind"] == KIND_SNAPSHOT + assert env["seq"] == 7 + assert isinstance(env["wall_time_unix"], float) + assert env["payload"] == snap.to_dict() + + +def test_jsonl_sink_truncates_on_open_and_increments_seq( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + path = tmp_path / "stream.jsonl" + sink = JsonlMetricsSink(path) + sink.open() + snap = minimal_pool_runtime_snapshot + sink.write_snapshot(snap) + sink.write_snapshot(snap) + sink.close() + + lines = path.read_text(encoding="utf-8").strip().split("\n") + assert len(lines) == 2 + a, b = (json.loads(line) for line in lines) + assert a["seq"] == 1 + assert b["seq"] == 2 + + +def test_jsonl_sink_new_open_truncates_previous_file( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + path = tmp_path / "stream.jsonl" + sink1 = JsonlMetricsSink(path) + sink1.open() + sink1.write_snapshot(minimal_pool_runtime_snapshot) + sink1.close() + + sink2 = JsonlMetricsSink(path) + sink2.open() + sink2.write_snapshot(minimal_pool_runtime_snapshot) + sink2.close() + + lines = path.read_text(encoding="utf-8").strip().split("\n") + assert len(lines) == 1 + assert json.loads(lines[0])["seq"] == 1 + + +def test_runtime_reporter_emits_snapshot_then_drained_events_in_order( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + """Each tick writes one snapshot line, then any events from the pool (order).""" + + class _PoolWithOneEvent: + def __init__(self, snap: PoolRuntimeSnapshot) -> None: + self._snap = snap + self._sent = False + + def runtime_snapshot(self) -> PoolRuntimeSnapshot: + return self._snap + + def drain_metrics_stream_events(self) -> list[MetricsStreamEvent]: + if self._sent: + return [] + self._sent = True + return [{"event": "session_started", "agent": "demo"}] + + path = tmp_path / "ordered.jsonl" + pool = _PoolWithOneEvent(minimal_pool_runtime_snapshot) + reporter = RuntimeReporter( + pool, + dashboard=False, + refresh_seconds=0.25, + json_output_path=None, + metrics_jsonl_path=path, + metrics_jsonl_interval=0.25, + ) + reporter.start() + try: + lines = _wait_for_jsonl_lines(path, min_lines=2, timeout=5.0) + finally: + reporter.stop() + + first = json.loads(lines[0]) + assert first["kind"] == KIND_SNAPSHOT + assert first["seq"] >= 1 + second = json.loads(lines[1]) + assert second["kind"] == KIND_EVENT + assert second["payload"]["event"] == "session_started" + + +def test_runtime_reporter_emits_jsonl_periodically( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + path = tmp_path / "live.jsonl" + pool = _StubPool(minimal_pool_runtime_snapshot) + reporter = RuntimeReporter( + pool, + dashboard=False, + refresh_seconds=0.25, + json_output_path=None, + metrics_jsonl_path=path, + metrics_jsonl_interval=0.25, + ) + reporter.start() + try: + lines = _wait_for_jsonl_lines(path, min_lines=2, timeout=5.0) + finally: + reporter.stop() + + first = json.loads(lines[0]) + last = json.loads(lines[-1]) + assert first["schema_version"] == METRICS_STREAM_SCHEMA_VERSION + assert last["seq"] > first["seq"] diff --git a/tests/test_pool.py b/tests/test_pool.py index dcef602..592025b 100644 --- a/tests/test_pool.py +++ b/tests/test_pool.py @@ -588,3 +588,9 @@ def __init__(self, **kwargs: object) -> None: assert snapshot.total_sessions_started == 0 assert snapshot.total_session_failures == 0 assert snapshot.sessions_by_agent == {} + + +def test_drain_metrics_stream_events_delegates_to_runtime_store() -> None: + pool = AgentPool() + pool.add("test", DemoAgent, stt="a", llm="b", tts="c") + assert pool.drain_metrics_stream_events() == [] diff --git a/tests/test_tui_app.py b/tests/test_tui_app.py new file mode 100644 index 0000000..8ddd16d --- /dev/null +++ b/tests/test_tui_app.py @@ -0,0 +1,366 @@ +"""Tests for the Textual sidecar ``openrtc tui --watch`` (requires Textual).""" + +from __future__ import annotations + +import json +import os +from pathlib import Path + +import pytest + +from openrtc.metrics_stream import snapshot_envelope +from openrtc.resources import PoolRuntimeSnapshot + +pytest.importorskip("textual") + + +@pytest.mark.asyncio +async def test_metrics_tui_displays_event_line(tmp_path) -> None: + from openrtc.metrics_stream import event_envelope + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "ev.jsonl" + ev = json.dumps( + event_envelope(seq=2, payload={"event": "session_started", "agent": "a"}), + sort_keys=True, + ) + path.write_text(ev + "\n", encoding="utf-8") + + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._poll_file() + await pilot.pause() + event_w = app.query_one("#event") + text = str(event_w.renderable) + assert "session_started" in text + assert "agent" in text + assert "a" in text + + +@pytest.mark.asyncio +async def test_metrics_tui_skips_malformed_line_then_parses_valid( + tmp_path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "mix.jsonl" + snap = minimal_pool_runtime_snapshot + good = json.dumps(snapshot_envelope(seq=1, snapshot=snap), sort_keys=True) + path.write_text("not-valid-json\n" + good + "\n", encoding="utf-8") + + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._poll_file() + await pilot.pause() + status = app.query_one("#status") + assert "seq=1" in str(status.renderable) + + +@pytest.mark.asyncio +async def test_metrics_tui_displays_snapshot_line( + tmp_path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "stream.jsonl" + snap = minimal_pool_runtime_snapshot + line = json.dumps(snapshot_envelope(seq=1, snapshot=snap), sort_keys=True) + path.write_text(line + "\n", encoding="utf-8") + + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._poll_file() + await pilot.pause() + status = app.query_one("#status") + text = str(status.renderable) + assert "seq=1" in text + assert "registered=1" in text + + +@pytest.mark.asyncio +async def test_metrics_tui_reopens_after_writer_truncates_file( + tmp_path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "rot.jsonl" + snap = minimal_pool_runtime_snapshot + first = json.dumps(snapshot_envelope(seq=1, snapshot=snap), sort_keys=True) + path.write_text(first + "\n", encoding="utf-8") + + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._poll_file() + await pilot.pause() + assert "seq=1" in str(app.query_one("#status").renderable) + + path.unlink() + second = json.dumps(snapshot_envelope(seq=2, snapshot=snap), sort_keys=True) + path.write_text(second + "\n", encoding="utf-8") + app._poll_file() + await pilot.pause() + assert "seq=2" in str(app.query_one("#status").renderable) + + +@pytest.mark.asyncio +async def test_metrics_tui_creates_watch_file_when_missing(tmp_path: Path) -> None: + from openrtc.tui_app import MetricsTuiApp + + watch = tmp_path / "nested" / "metrics.jsonl" + app = MetricsTuiApp(watch, from_start=True) + async with app.run_test(): + assert watch.is_file() + + +@pytest.mark.asyncio +async def test_metrics_tui_tail_mode_seeks_to_end_then_reads_appends( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "tail.jsonl" + snap = minimal_pool_runtime_snapshot + path.write_text( + json.dumps(snapshot_envelope(seq=1, snapshot=snap), sort_keys=True) + "\n", + encoding="utf-8", + ) + app = MetricsTuiApp(path, from_start=False) + async with app.run_test() as pilot: + assert app._fh is not None + assert app._fh.tell() == path.stat().st_size + more = ( + json.dumps(snapshot_envelope(seq=2, snapshot=snap), sort_keys=True) + "\n" + ) + path.write_text(path.read_text(encoding="utf-8") + more, encoding="utf-8") + app._poll_file() + await pilot.pause() + assert "seq=2" in str(app.query_one("#status").renderable) + + +@pytest.mark.asyncio +async def test_metrics_tui_poll_returns_early_when_no_new_bytes( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "empty_poll.jsonl" + snap = minimal_pool_runtime_snapshot + path.write_text( + json.dumps(snapshot_envelope(seq=1, snapshot=snap), sort_keys=True) + "\n", + encoding="utf-8", + ) + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._poll_file() + await pilot.pause() + app._poll_file() + await pilot.pause() + assert "seq=1" in str(app.query_one("#status").renderable) + + +@pytest.mark.asyncio +async def test_metrics_tui_sync_opens_when_handle_cleared( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "reopen.jsonl" + snap = minimal_pool_runtime_snapshot + path.write_text( + json.dumps(snapshot_envelope(seq=1, snapshot=snap), sort_keys=True) + "\n", + encoding="utf-8", + ) + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._fh.close() + app._fh = None + app._poll_file() + await pilot.pause() + assert app._fh is not None + assert "seq=1" in str(app.query_one("#status").renderable) + + +@pytest.mark.asyncio +async def test_metrics_tui_refresh_event_line_noop_without_event( + tmp_path: Path, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "no_ev.jsonl" + path.touch() + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._last_event = None + app._refresh_event_line() + await pilot.pause() + + +@pytest.mark.asyncio +async def test_metrics_tui_refresh_view_noop_when_latest_missing( + tmp_path: Path, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "no_latest.jsonl" + path.touch() + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._latest = None + app._refresh_view() + await pilot.pause() + + +@pytest.mark.asyncio +async def test_metrics_tui_sync_ignores_stat_oserror( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + import openrtc.tui_app as tu + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "stat_err.jsonl" + path.touch() + real_stat = os.stat + armed = {"on": False} + + target = os.fspath(path) + + def stat_fn( + p: str | os.PathLike[str], + *args: object, + **kwargs: object, + ) -> os.stat_result: + if armed["on"] and os.fspath(p) == target: + raise OSError("stat failed") + return real_stat(p, *args, **kwargs) + + monkeypatch.setattr(tu.os, "stat", stat_fn) + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + armed["on"] = True + app._poll_file() + await pilot.pause() + + +@pytest.mark.asyncio +async def test_metrics_tui_refresh_view_skips_bad_payload_shapes( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "bad_payload.jsonl" + path.touch() + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app._latest = {"payload": "not-a-dict"} + app._refresh_view() + app._latest = { + "seq": 9, + "wall_time_unix": 1.0, + "payload": { + "registered_agents": 1, + "active_sessions": 0, + "uptime_seconds": 1.0, + "sessions_by_agent": [1, 2], + "last_routed_agent": None, + "last_error": None, + "total_sessions_started": 0, + "total_session_failures": 0, + }, + } + app._refresh_view() + await pilot.pause() + text = str(app.query_one("#agents").renderable) + assert "invalid payload" in text + + +@pytest.mark.asyncio +async def test_metrics_tui_wall_time_invalid_falls_back_to_na( + tmp_path: Path, + minimal_pool_runtime_snapshot: PoolRuntimeSnapshot, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "wall.jsonl" + path.touch() + app = MetricsTuiApp(path, from_start=True) + snap = minimal_pool_runtime_snapshot + async with app.run_test() as pilot: + app._latest = { + "seq": 3, + "wall_time_unix": "not-numeric", + "payload": snap.to_dict(), + } + app._refresh_view() + await pilot.pause() + assert "wall=n/a" in str(app.query_one("#status").renderable) + + +@pytest.mark.asyncio +async def test_metrics_tui_action_quit_exits(tmp_path: Path) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "quit.jsonl" + path.touch() + app = MetricsTuiApp(path, from_start=True) + async with app.run_test() as pilot: + app.action_quit() + await pilot.pause() + + +def test_run_metrics_tui_calls_app_run( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch +) -> None: + import openrtc.tui_app as tu + + ran: list[object] = [] + + def fake_run(self: object) -> None: + ran.append(self) + + monkeypatch.setattr(tu.MetricsTuiApp, "run", fake_run) + p = tmp_path / "x.jsonl" + p.touch() + tu.run_metrics_tui(p, from_start=True) + assert len(ran) == 1 + assert getattr(ran[0], "_path", None) == p.resolve() + + +@pytest.mark.asyncio +async def test_metrics_tui_poll_returns_when_open_does_not_restore_handle( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "noop_open.jsonl" + path.touch() + app = MetricsTuiApp(path, from_start=True) + async with app.run_test(): + + def noop_open() -> None: + app._fh = None + app._buf = "" + + monkeypatch.setattr(app, "_open_metrics_file", noop_open) + app._fh = None + app._poll_file() + + +@pytest.mark.asyncio +async def test_metrics_tui_on_unmount_closes_file_handle(tmp_path: Path) -> None: + from openrtc.tui_app import MetricsTuiApp + + path = tmp_path / "um.jsonl" + path.touch() + app = MetricsTuiApp(path, from_start=True) + async with app.run_test(): + assert app._fh is not None + assert app._fh is None diff --git a/uv.lock b/uv.lock index 42a32eb..0fc073a 100644 --- a/uv.lock +++ b/uv.lock @@ -954,6 +954,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d3/97/68f80ca3ac4924f250cdfa6e20142a803e5e50fca96ef5148c52ee8c10ea/librt-0.8.1-cp313-cp313-win_arm64.whl", hash = "sha256:924817ab3141aca17893386ee13261f1d100d1ef410d70afe4389f2359fea4f0", size = 52495, upload-time = "2026-02-17T16:12:11.633Z" }, ] +[[package]] +name = "linkify-it-py" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "uc-micro-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/c9/06ea13676ef354f0af6169587ae292d3e2406e212876a413bf9eece4eb23/linkify_it_py-2.1.0.tar.gz", hash = "sha256:43360231720999c10e9328dc3691160e27a718e280673d444c38d7d3aaa3b98b", size = 29158, upload-time = "2026-03-01T07:48:47.683Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/de/88b3be5c31b22333b3ca2f6ff1de4e863d8fe45aaea7485f591970ec1d3e/linkify_it_py-2.1.0-py3-none-any.whl", hash = "sha256:0d252c1594ecba2ecedc444053db5d3a9b7ec1b0dd929c8f1d74dce89f86c05e", size = 19878, upload-time = "2026-03-01T07:48:46.098Z" }, +] + [[package]] name = "livekit" version = "1.0.25" @@ -1147,6 +1159,14 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, ] +[package.optional-dependencies] +linkify = [ + { name = "linkify-it-py" }, +] +plugins = [ + { name = "mdit-py-plugins" }, +] + [[package]] name = "markupsafe" version = "3.0.3" @@ -1210,6 +1230,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, ] +[[package]] +name = "mdit-py-plugins" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" }, +] + [[package]] name = "mdurl" version = "0.1.2" @@ -1629,6 +1661,11 @@ cli = [ { name = "rich" }, { name = "typer" }, ] +tui = [ + { name = "rich" }, + { name = "textual" }, + { name = "typer" }, +] [package.dev-dependencies] dev = [ @@ -1640,6 +1677,7 @@ dev = [ { name = "python-dotenv" }, { name = "rich" }, { name = "ruff" }, + { name = "textual" }, { name = "typer" }, ] @@ -1647,9 +1685,12 @@ dev = [ requires-dist = [ { name = "livekit-agents", extras = ["openai", "silero", "turn-detector"], specifier = "~=1.4" }, { name = "rich", marker = "extra == 'cli'", specifier = ">=13" }, + { name = "rich", marker = "extra == 'tui'", specifier = ">=13" }, + { name = "textual", marker = "extra == 'tui'", specifier = ">=0.47,<2" }, { name = "typer", marker = "extra == 'cli'", specifier = ">=0.12" }, + { name = "typer", marker = "extra == 'tui'", specifier = ">=0.12" }, ] -provides-extras = ["cli"] +provides-extras = ["cli", "tui"] [package.metadata.requires-dev] dev = [ @@ -1661,6 +1702,7 @@ dev = [ { name = "python-dotenv", specifier = ">=1.2.2" }, { name = "rich", specifier = ">=13" }, { name = "ruff", specifier = ">=0.15.6" }, + { name = "textual", specifier = ">=0.47,<2" }, { name = "typer", specifier = ">=0.12" }, ] @@ -2495,6 +2537,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, ] +[[package]] +name = "textual" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py", extra = ["linkify", "plugins"] }, + { name = "platformdirs" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/b6/59b1de04bb4dca0f21ed7ba0b19309ed7f3f5de4396edf20cc2855e53085/textual-1.0.0.tar.gz", hash = "sha256:bec9fe63547c1c552569d1b75d309038b7d456c03f86dfa3706ddb099b151399", size = 1532733, upload-time = "2024-12-12T10:42:03.286Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/bb/5fb6656c625019cd653d5215237d7cd6e0b12e7eae4195c3d1c91b2136fc/textual-1.0.0-py3-none-any.whl", hash = "sha256:2d4a701781c05104925e463ae370c630567c70c2880e92ab838052e3e23c986f", size = 660456, upload-time = "2024-12-12T10:42:00.375Z" }, +] + [[package]] name = "tokenizers" version = "0.22.2" @@ -2640,6 +2697,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, ] +[[package]] +name = "uc-micro-py" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/78/67/9a363818028526e2d4579334460df777115bdec1bb77c08f9db88f6389f2/uc_micro_py-2.0.0.tar.gz", hash = "sha256:c53691e495c8db60e16ffc4861a35469b0ba0821fe409a8a7a0a71864d33a811", size = 6611, upload-time = "2026-03-01T06:31:27.526Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/73/d21edf5b204d1467e06500080a50f79d49ef2b997c79123a536d4a17d97c/uc_micro_py-2.0.0-py3-none-any.whl", hash = "sha256:3603a3859af53e5a39bc7677713c78ea6589ff188d70f4fee165db88e22b242c", size = 6383, upload-time = "2026-03-01T06:31:26.257Z" }, +] + [[package]] name = "urllib3" version = "2.6.3"