From 815e4aef74b38bb4429dc6c3849d477774bab278 Mon Sep 17 00:00:00 2001 From: streamer45 Date: Fri, 16 Jan 2026 09:13:56 +0100 Subject: [PATCH] feat: implement built-in auth --- .github/workflows/e2e.yml | 4 + .gitignore | 3 + Cargo.lock | 32 + DOCKER.md | 42 +- README.md | 22 +- REUSE.toml | 1 + ROADMAP.md | 2 +- apps/skit/Cargo.toml | 15 +- apps/skit/src/auth/claims.rs | 224 ++++++ apps/skit/src/auth/cookie.rs | 125 ++++ apps/skit/src/auth/extractor.rs | 264 +++++++ apps/skit/src/auth/handlers.rs | 486 ++++++++++++ apps/skit/src/auth/mod.rs | 704 ++++++++++++++++++ apps/skit/src/auth/moq.rs | 277 +++++++ apps/skit/src/auth/stores/file.rs | 691 +++++++++++++++++ apps/skit/src/auth/stores/mod.rs | 191 +++++ apps/skit/src/cli.rs | 490 ++++++++++++ apps/skit/src/config.rs | 86 +++ apps/skit/src/lib.rs | 1 + apps/skit/src/main.rs | 1 + apps/skit/src/moq_gateway.rs | 9 +- apps/skit/src/permissions.rs | 63 +- apps/skit/src/server.rs | 628 ++++++++++++++-- apps/skit/src/state.rs | 2 + apps/skit/tests/auth_integration_test.rs | 207 +++++ apps/skit/tests/base_path_routing_test.rs | 2 +- apps/skit/tests/end_to_end_test.rs | 2 +- apps/skit/tests/http_origin_test.rs | 2 +- apps/skit/tests/http_sessions_test.rs | 2 +- apps/skit/tests/plugin_integration_test.rs | 2 +- apps/skit/tests/session_lifecycle_test.rs | 2 +- apps/skit/tests/websocket_origin_test.rs | 2 +- crates/core/src/moq_gateway.rs | 17 + crates/nodes/src/transport/moq/mod.rs | 48 ++ crates/nodes/src/transport/moq/peer.rs | 59 ++ crates/nodes/src/transport/moq/pull.rs | 28 +- crates/nodes/src/transport/moq/push.rs | 18 +- deploy/systemd/skit.toml | 8 +- docker-skit-demo.toml | 13 +- docker-skit-gpu.toml | 13 +- docker-skit.toml | 13 +- docs/astro.config.mjs | 1 + docs/src/content/docs/deployment/docker.md | 40 +- docs/src/content/docs/deployment/systemd.md | 8 + .../docs/getting-started/installation.md | 4 +- .../docs/getting-started/quick-start.md | 20 +- .../src/content/docs/guides/authentication.md | 142 ++++ docs/src/content/docs/guides/load-testing.md | 2 +- docs/src/content/docs/guides/security.md | 4 +- docs/src/content/docs/guides/web-ui.md | 8 + .../content/docs/guides/writing-plugins.md | 2 +- docs/src/content/docs/index.mdx | 2 +- .../docs/reference/configuration-generated.md | 177 ++++- .../content/docs/reference/configuration.md | 22 +- docs/src/content/docs/reference/http-api.md | 14 + .../nodes/transport-moq-publisher.md | 9 + .../nodes/transport-moq-subscriber.md | 9 + .../content/docs/reference/plugins/index.md | 3 - e2e/README.md | 31 + e2e/src/harness/run.ts | 80 +- e2e/tests/auth-helpers.ts | 116 +++ e2e/tests/auth.spec.ts | 55 ++ e2e/tests/design.spec.ts | 3 + e2e/tests/monitor.spec.ts | 16 +- justfile | 12 + .../loadtest/pipelines/moq_broadcaster.yml | 2 + .../pipelines/moq_mixing_selfcontained.yml | 6 + .../loadtest/pipelines/moq_selfcontained.yml | 6 + .../pipelines/moq_subscriber_transcode.yml | 4 + samples/skit.toml | 33 +- ui/src/App.tsx | 62 +- ui/src/Layout.tsx | 5 + ui/src/hooks/useSession.ts | 5 +- ui/src/hooks/useSessionsPrefetch.ts | 5 +- ui/src/services/assets.ts | 19 +- ui/src/services/auth.ts | 130 ++++ ui/src/services/base.ts | 50 +- ui/src/services/config.ts | 5 +- ui/src/services/converter.test.ts | 4 + ui/src/services/converter.ts | 8 +- ui/src/services/permissions.test.ts | 5 + ui/src/services/permissions.ts | 5 +- ui/src/services/plugins.ts | 8 +- ui/src/services/samples.ts | 31 +- ui/src/services/sessions.ts | 12 +- ui/src/services/websocket.ts | 19 +- ui/src/stores/pluginStore.ts | 8 +- ui/src/stores/schemaStore.ts | 12 +- ui/src/stores/streamStore.test.ts | 8 + ui/src/stores/streamStore.ts | 11 +- ui/src/views/LoginView.tsx | 245 ++++++ ui/src/views/MintedTokensTable.test.tsx | 124 +++ ui/src/views/MintedTokensTable.tsx | 436 +++++++++++ ui/src/views/StreamView.tsx | 16 + ui/src/views/TokensView.styles.ts | 364 +++++++++ ui/src/views/TokensView.tsx | 514 +++++++++++++ 96 files changed, 7496 insertions(+), 247 deletions(-) create mode 100644 apps/skit/src/auth/claims.rs create mode 100644 apps/skit/src/auth/cookie.rs create mode 100644 apps/skit/src/auth/extractor.rs create mode 100644 apps/skit/src/auth/handlers.rs create mode 100644 apps/skit/src/auth/mod.rs create mode 100644 apps/skit/src/auth/moq.rs create mode 100644 apps/skit/src/auth/stores/file.rs create mode 100644 apps/skit/src/auth/stores/mod.rs create mode 100644 apps/skit/tests/auth_integration_test.rs create mode 100644 docs/src/content/docs/guides/authentication.md create mode 100644 e2e/tests/auth-helpers.ts create mode 100644 e2e/tests/auth.spec.ts create mode 100644 ui/src/services/auth.ts create mode 100644 ui/src/views/LoginView.tsx create mode 100644 ui/src/views/MintedTokensTable.test.tsx create mode 100644 ui/src/views/MintedTokensTable.tsx create mode 100644 ui/src/views/TokensView.styles.ts create mode 100644 ui/src/views/TokensView.tsx diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 50b587b8..d32b65df 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -65,6 +65,10 @@ jobs: working-directory: ./e2e run: bun run test + - name: Run E2E tests (auth enabled) + working-directory: ./e2e + run: E2E_AUTH=1 bun run test + - name: Upload Playwright report uses: actions/upload-artifact@v4 if: failure() diff --git a/.gitignore b/.gitignore index 5d4126e2..1a75003f 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,9 @@ samples/pipelines/user samples/audio/user/* !samples/audio/user/.gitkeep +# StreamKit runtime state (keys/tokens/config caches) +.streamkit + # Audio samples - only keep opus/ogg formats samples/audio/system/*.wav samples/audio/system/*.flac diff --git a/Cargo.lock b/Cargo.lock index 46b6dd0d..4307c87b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2228,6 +2228,21 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonwebtoken" +version = "10.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c76e1c7d7df3e34443b3621b459b066a7b79644f059fc8b2db7070c825fd417e" +dependencies = [ + "aws-lc-rs", + "base64 0.22.1", + "getrandom 0.2.16", + "js-sys", + "serde", + "serde_json", + "signature", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -4276,6 +4291,15 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "simd-adler32" version = "0.3.8" @@ -4527,8 +4551,10 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "aws-lc-rs", "axum", "axum-server", + "base64 0.22.1", "bytes", "clap", "console-subscriber", @@ -4536,11 +4562,15 @@ dependencies = [ "figment", "futures", "futures-util", + "getrandom 0.3.4", "glob", + "hex", "http-body-util", "hyper", "jemalloc_pprof", + "jsonwebtoken", "mime_guess", + "moq-lite", "moq-native", "multer", "ogg", @@ -4557,6 +4587,7 @@ dependencies = [ "serde", "serde-saphyr", "serde_json", + "sha2", "streamkit-api", "streamkit-core", "streamkit-engine", @@ -4565,6 +4596,7 @@ dependencies = [ "streamkit-plugin-wasm", "sysinfo", "tempfile", + "thiserror 2.0.17", "tikv-jemallocator", "time", "tokio", diff --git a/DOCKER.md b/DOCKER.md index 33469d52..5cb3e971 100644 --- a/DOCKER.md +++ b/DOCKER.md @@ -26,12 +26,30 @@ This guide covers building and running StreamKit Docker images. The official “ ```bash docker build -f Dockerfile.demo -t streamkit:demo . -docker run \ +docker run --rm --name streamkit-demo \ -p 127.0.0.1:4545:4545/tcp \ -p 127.0.0.1:4545:4545/udp \ streamkit:demo ``` +> [!NOTE] +> The demo image binds to `0.0.0.0:4545` inside the container so published ports work. With `auth.mode=auto`, built-in auth is enabled by default. +> To log in, print the bootstrap admin token and paste it into `http://localhost:4545/login`: +> +> ```bash +> docker exec streamkit-demo skit auth print-admin-token --raw +> ``` + +> [!NOTE] +> Linux-only (no login): run with host networking and bind to loopback inside the container to keep auth disabled in `auth.mode=auto`: +> +> ```bash +> docker run --rm --name streamkit-demo \ +> --network host \ +> -e SK_SERVER__ADDRESS=127.0.0.1:4545 \ +> streamkit:demo +> ``` + If you want the OpenAI-powered sample pipelines, pass `OPENAI_API_KEY` without putting it directly in the command: ```bash @@ -106,6 +124,28 @@ docker run --rm -d --name streamkit \ # Note: the image defaults to `skit serve` (you can also pass it explicitly). +> [!CAUTION] +> StreamKit ships with built-in authentication (auto-enabled on non-loopback binds, including Docker’s `0.0.0.0`). +> If you see the login page, fetch the bootstrap admin token with: +> +> ```bash +> docker exec streamkit skit auth print-admin-token +> ``` +> +> The default token path inside the container is `/opt/streamkit/.streamkit/auth/admin.token`. +> Mount `/opt/streamkit/.streamkit` (or set `[auth].state_dir`) if you want the auth state persisted across restarts. +> +> Linux-only (frictionless demo): run with host networking and bind to loopback inside the container to keep auth disabled in `auth.mode=auto`: +> +> ```bash +> docker run --rm -d --name streamkit \ +> --network host \ +> -e SK_SERVER__ADDRESS=127.0.0.1:4545 \ +> -v $(pwd)/models:/opt/streamkit/models:ro \ +> -v $(pwd)/.plugins:/opt/streamkit/plugins:ro \ +> streamkit:latest +> ``` + # Open http://localhost:4545 in your browser # To stop: docker stop streamkit ``` diff --git a/README.md b/README.md index 276cac71..49e31e78 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ If you try it and something feels off, please open an issue (or a small PR). For The fastest way to get started is using pre-built Docker images from GitHub Container Registry (GHCR). The image serves the web UI and includes sample pipelines. > [!CAUTION] -> StreamKit does not currently implement authentication. Do not expose it directly to the public internet. Bind to localhost (recommended) or put it behind an authenticating reverse proxy and a trusted role header. See . +> StreamKit ships with built-in authentication (auto-enabled on non-loopback binds). If you see the login page, run `skit auth print-admin-token` and paste the token; admins can mint additional tokens in **Admin → Access Tokens**. Do not disable auth when exposing it beyond localhost; see and . > [!NOTE] > Official Docker images are published for `linux/amd64` (x86_64). On ARM hosts (Raspberry Pi, Apple Silicon, etc.), use “Build from Source” or run with amd64 emulation. @@ -111,12 +111,30 @@ docker run --rm \ The `:latest-demo` image bundles core plugins plus the models needed by the shipped sample pipelines (much larger image; intended for demos/evaluation, not production). ```bash -docker run --rm \ +docker run --rm --name streamkit-demo \ -p 127.0.0.1:4545:4545/tcp \ -p 127.0.0.1:4545:4545/udp \ ghcr.io/streamer45/streamkit:latest-demo ``` +> [!NOTE] +> In Docker, StreamKit binds to `0.0.0.0` inside the container so published ports work. With `auth.mode=auto`, this means built-in auth is enabled by default. +> To log in, print the bootstrap admin token and paste it into `http://localhost:4545/login`: +> +> ```bash +> docker exec streamkit-demo skit auth print-admin-token --raw +> ``` + +> [!NOTE] +> Linux-only (no login): run with host networking and bind to loopback inside the container to keep auth disabled in `auth.mode=auto`: +> +> ```bash +> docker run --rm --name streamkit-demo \ +> --network host \ +> -e SK_SERVER__ADDRESS=127.0.0.1:4545 \ +> ghcr.io/streamer45/streamkit:latest-demo +> ``` + If you want the OpenAI-powered sample pipelines, pass `OPENAI_API_KEY` without putting it directly in the command: ```bash diff --git a/REUSE.toml b/REUSE.toml index 503abc3c..081d66ca 100644 --- a/REUSE.toml +++ b/REUSE.toml @@ -86,6 +86,7 @@ SPDX-License-Identifier = "MPL-2.0" [[annotations]] path = [ "target/**", + ".streamkit/**", "node_modules/**", "dist/**", "build/**", diff --git a/ROADMAP.md b/ROADMAP.md index 1d49fdc9..fc41d3ee 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -38,7 +38,7 @@ These are in place today and will be iterated on (not “added from scratch”): - **Playwright E2E** + CI workflow (expand coverage over time) - **Load testing runner + presets** (curate canonical scenarios + track budgets) - **Observability baseline** (logs + OTLP metrics/traces + profiling helpers) -- **RBAC permissions model** (roles + allowlists), even though authentication is not yet implemented +- **RBAC + built-in auth foundation** (roles/allowlists + JWT auth), with secure-by-default behavior on non-loopback binds ## Near-Term (v0.1 → v0.5) diff --git a/apps/skit/Cargo.toml b/apps/skit/Cargo.toml index a0831267..300a00ad 100644 --- a/apps/skit/Cargo.toml +++ b/apps/skit/Cargo.toml @@ -54,6 +54,7 @@ tokio-stream = "0.1" hyper = { version = "1.8", features = ["full"] } axum-server = { version = "0.8", features = ["tls-rustls"] } rustls = { version = "0.23", features = ["ring"] } +reqwest = { version = "0.12", features = ["multipart", "json"] } bytes = { workspace = true } futures = { workspace = true } uuid = { version = "1.19", features = ["v4", "serde"] } @@ -113,6 +114,17 @@ async-trait = { workspace = true } # For glob pattern matching in permissions glob = "0.3" +# For built-in authentication +jsonwebtoken = { version = "10.2.0", default-features = false, features = ["aws_lc_rs"] } +sha2 = "0.10" +hex = "0.4" +base64 = "0.22" +thiserror = "2.0" +getrandom = "0.3" +aws-lc-rs = "1" + +# For MoQ auth path matching (optional, with moq feature) +moq-lite = { version = "0.10", optional = true } [features] default = ["script"] @@ -121,12 +133,11 @@ profiling = ["dep:pprof", "dep:tikv-jemallocator", "dep:jemalloc_pprof"] # DHAT allocation profiling - tracks allocation counts/rates (mutually exclusive with profiling) # Use this to find hot allocation sites. Output is written on graceful shutdown. dhat-heap = ["dep:dhat"] -moq = ["dep:moq-native"] +moq = ["dep:moq-native", "dep:moq-lite"] script = ["streamkit-nodes/script", "streamkit-engine/script"] [dev-dependencies] tokio-test = "0.4" -reqwest = { version = "0.12", features = ["multipart", "json"] } tokio-tungstenite = "0.28" futures-util = "0.3" ogg = "0.9.2" diff --git a/apps/skit/src/auth/claims.rs b/apps/skit/src/auth/claims.rs new file mode 100644 index 00000000..04a8f84a --- /dev/null +++ b/apps/skit/src/auth/claims.rs @@ -0,0 +1,224 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! JWT claim structures for StreamKit authentication. +//! +//! StreamKit uses two types of JWTs: +//! - **API tokens** (`aud: "skit-api"`): For HTTP API and WebSocket control plane +//! - **MoQ tokens** (`aud: "skit-moq"`): For MoQ/WebTransport connections +//! +//! Both token types require `jti` (JWT ID) for revocation support. + +use serde::{Deserialize, Serialize}; + +/// Audience value for API tokens. +pub const AUD_API: &str = "skit-api"; + +/// Audience value for MoQ tokens. +#[allow(dead_code)] +pub const AUD_MOQ: &str = "skit-moq"; + +/// JWT claims for API tokens (HTTP API and WebSocket control plane). +/// +/// These tokens are used for: +/// - HTTP API authentication (via Authorization header or cookie) +/// - WebSocket control plane connections +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ApiClaims { + /// Audience - must be "skit-api" + pub aud: String, + + /// Subject - identifier for the token holder + pub sub: String, + + /// Role name (e.g., "admin", "user", "viewer") + pub role: String, + + /// Issued at (Unix timestamp in seconds) + pub iat: u64, + + /// Expiration (Unix timestamp in seconds) - REQUIRED + pub exp: u64, + + /// JWT ID - REQUIRED for revocation + pub jti: String, +} + +impl ApiClaims { + /// Create anonymous claims for when auth is disabled. + /// + /// This is used to maintain consistent AuthContext structure + /// even when authentication is not required. + pub fn anonymous(role: &str) -> Self { + Self { + aud: AUD_API.to_string(), + sub: "anonymous".to_string(), + role: role.to_string(), + iat: 0, + exp: u64::MAX, + jti: "anonymous".to_string(), + } + } + + /// Validate the claims structure (not cryptographic validation). + /// + /// # Errors + /// + /// Returns validation errors for invalid audience, missing jti, or missing role. + pub fn validate(&self) -> Result<(), ClaimsValidationError> { + if self.aud != AUD_API { + return Err(ClaimsValidationError::InvalidAudience { + expected: AUD_API.to_string(), + actual: self.aud.clone(), + }); + } + if self.jti.is_empty() { + return Err(ClaimsValidationError::MissingJti); + } + if self.role.is_empty() { + return Err(ClaimsValidationError::MissingRole); + } + Ok(()) + } +} + +/// JWT claims for MoQ tokens (WebTransport connections). +/// +/// Compatible with moq-token format. The `subscribe` and `publish` fields +/// are **broadcast path prefixes**, not URL paths. +/// +/// Path semantics: +/// - `[""]` (empty string in array) = all broadcasts allowed +/// - `[]` (empty array) = no broadcasts allowed +/// - `["foo", "bar"]` = broadcasts starting with "foo" or "bar" allowed +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(dead_code)] +pub struct MoqClaims { + /// Audience - must be "skit-moq" + pub aud: String, + + /// URL path prefix that this token is valid for (e.g., "/moq/session1") + pub root: String, + + /// Allowed broadcast prefixes for subscribing (JWT claim: `get`). + /// Empty string means "all", empty array means "none". + #[serde(default, rename = "get", alias = "subscribe")] + pub subscribe: Vec, + + /// Allowed broadcast prefixes for publishing (JWT claim: `put`). + /// Empty string means "all", empty array means "none". + #[serde(default, rename = "put", alias = "publish")] + pub publish: Vec, + + /// Issued at (Unix timestamp in seconds) + pub iat: u64, + + /// Expiration (Unix timestamp in seconds) - REQUIRED + pub exp: u64, + + /// JWT ID - REQUIRED for revocation + pub jti: String, +} + +#[allow(dead_code)] +impl MoqClaims { + /// Validate the claims structure (not cryptographic validation). + /// + /// # Errors + /// + /// Returns validation errors for invalid audience, missing jti, or missing root. + pub fn validate(&self) -> Result<(), ClaimsValidationError> { + if self.aud != AUD_MOQ { + return Err(ClaimsValidationError::InvalidAudience { + expected: AUD_MOQ.to_string(), + actual: self.aud.clone(), + }); + } + if self.jti.is_empty() { + return Err(ClaimsValidationError::MissingJti); + } + if self.root.is_empty() { + return Err(ClaimsValidationError::MissingRoot); + } + Ok(()) + } +} + +/// Errors that can occur during claims validation. +#[derive(Debug, thiserror::Error)] +pub enum ClaimsValidationError { + #[error("Invalid audience: expected {expected}, got {actual}")] + InvalidAudience { expected: String, actual: String }, + + #[error("Missing jti claim (required for revocation)")] + MissingJti, + + #[error("Missing role claim")] + MissingRole, + + #[error("Missing root claim")] + #[allow(dead_code)] + MissingRoot, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_api_claims_validation() { + let valid = ApiClaims { + aud: AUD_API.to_string(), + sub: "user123".to_string(), + role: "admin".to_string(), + iat: 1000, + exp: 2000, + jti: "abc-123".to_string(), + }; + assert!(valid.validate().is_ok()); + + // Wrong audience + let wrong_aud = ApiClaims { aud: AUD_MOQ.to_string(), ..valid.clone() }; + assert!(matches!(wrong_aud.validate(), Err(ClaimsValidationError::InvalidAudience { .. }))); + + // Missing jti + let no_jti = ApiClaims { jti: String::new(), ..valid.clone() }; + assert!(matches!(no_jti.validate(), Err(ClaimsValidationError::MissingJti))); + + // Missing role + let no_role = ApiClaims { role: String::new(), ..valid }; + assert!(matches!(no_role.validate(), Err(ClaimsValidationError::MissingRole))); + } + + #[test] + fn test_moq_claims_validation() { + let valid = MoqClaims { + aud: AUD_MOQ.to_string(), + root: "/moq/session1".to_string(), + subscribe: vec![String::new()], // Allow all + publish: vec![String::new()], // Allow all + iat: 1000, + exp: 2000, + jti: "moq-123".to_string(), + }; + assert!(valid.validate().is_ok()); + + // Wrong audience + let wrong_aud = MoqClaims { aud: AUD_API.to_string(), ..valid.clone() }; + assert!(matches!(wrong_aud.validate(), Err(ClaimsValidationError::InvalidAudience { .. }))); + + // Missing root + let no_root = MoqClaims { root: String::new(), ..valid }; + assert!(matches!(no_root.validate(), Err(ClaimsValidationError::MissingRoot))); + } + + #[test] + fn test_anonymous_claims() { + let anon = ApiClaims::anonymous("viewer"); + assert_eq!(anon.aud, AUD_API); + assert_eq!(anon.sub, "anonymous"); + assert_eq!(anon.role, "viewer"); + assert_eq!(anon.jti, "anonymous"); + } +} diff --git a/apps/skit/src/auth/cookie.rs b/apps/skit/src/auth/cookie.rs new file mode 100644 index 00000000..7b14d32d --- /dev/null +++ b/apps/skit/src/auth/cookie.rs @@ -0,0 +1,125 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! Cookie building helpers for session management. +//! +//! Cookies are used for browser-based authentication. The session cookie +//! is HttpOnly and SameSite=Strict for security. + +use crate::config::Config; +use axum::http::HeaderValue; + +fn normalize_cookie_path(base_path: Option<&str>) -> String { + let path = base_path.unwrap_or("/").trim(); + if path.is_empty() || path == "/" { + return "/".to_string(); + } + + let path = path.trim_end_matches('/'); + if path.is_empty() || path == "/" { + return "/".to_string(); + } + + if path.starts_with('/') { + path.to_string() + } else { + format!("/{path}") + } +} + +/// Build a session cookie header value for login. +/// +/// The cookie is configured with: +/// - HttpOnly: Prevents JavaScript access (XSS protection) +/// - SameSite=Strict: Prevents CSRF attacks +/// - Secure: Only sent over HTTPS (if TLS is enabled) +/// - Path: Set to base_path for subpath deployment safety +pub fn build_session_cookie( + token: &str, + config: &Config, + max_age_secs: u64, +) -> Option { + let cookie_name = &config.auth.cookie_name; + let secure = config.server.tls; + + // Path = base_path for subpath safety (or "/" if not set) + let path = normalize_cookie_path(config.server.base_path.as_deref()); + + let cookie = format!( + "{cookie_name}={token}; HttpOnly; SameSite=Strict; Path={path}{}; Max-Age={max_age_secs}", + if secure { "; Secure" } else { "" }, + ); + + HeaderValue::from_str(&cookie).ok() +} + +/// Build a logout cookie header value that clears the session. +/// +/// Sets Max-Age=0 to immediately expire the cookie. +pub fn build_logout_cookie(config: &Config) -> Option { + let cookie_name = &config.auth.cookie_name; + let path = normalize_cookie_path(config.server.base_path.as_deref()); + let secure = config.server.tls; + + let cookie = format!( + "{cookie_name}=; HttpOnly; SameSite=Strict; Path={path}{}; Max-Age=0", + if secure { "; Secure" } else { "" }, + ); + + HeaderValue::from_str(&cookie).ok() +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + + #[test] + fn test_session_cookie_basic() { + let config = Config::default(); + let cookie = build_session_cookie("test-token", &config, 3600).unwrap(); + let cookie_str = cookie.to_str().unwrap(); + + assert!(cookie_str.contains("skit_session=test-token")); + assert!(cookie_str.contains("HttpOnly")); + assert!(cookie_str.contains("SameSite=Strict")); + assert!(cookie_str.contains("Path=/")); + assert!(cookie_str.contains("Max-Age=3600")); + // No Secure flag when TLS is not configured + assert!(!cookie_str.contains("Secure")); + } + + #[test] + fn test_session_cookie_with_base_path() { + let mut config = Config::default(); + config.server.base_path = Some("/api/v1".to_string()); + + let cookie = build_session_cookie("test-token", &config, 3600).unwrap(); + let cookie_str = cookie.to_str().unwrap(); + + assert!(cookie_str.contains("Path=/api/v1")); + } + + #[test] + fn test_logout_cookie() { + let config = Config::default(); + let cookie = build_logout_cookie(&config).unwrap(); + let cookie_str = cookie.to_str().unwrap(); + + assert!(cookie_str.contains("skit_session=")); + assert!(cookie_str.contains("Max-Age=0")); + assert!(cookie_str.contains("HttpOnly")); + assert!(!cookie_str.contains("Secure")); + } + + #[test] + fn test_logout_cookie_secure() { + let mut config = Config::default(); + config.server.tls = true; + let cookie = build_logout_cookie(&config).unwrap(); + let cookie_str = cookie.to_str().unwrap(); + + assert!(cookie_str.contains("Secure")); + } +} diff --git a/apps/skit/src/auth/extractor.rs b/apps/skit/src/auth/extractor.rs new file mode 100644 index 00000000..9207db75 --- /dev/null +++ b/apps/skit/src/auth/extractor.rs @@ -0,0 +1,264 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! Authentication context and token extraction utilities. +//! +//! This module provides utilities for extracting and validating JWT tokens +//! from HTTP requests. The main types are: +//! +//! - `AuthContext`: Represents an authenticated request with validated claims +//! - `MaybeAuth`: Optional authentication that never fails +//! +//! These can be used by handlers to check authentication status. + +use super::{ApiClaims, AuthState}; +use crate::config::Config; +use crate::permissions::Permissions; +use axum::http::header::{AUTHORIZATION, COOKIE}; +use axum::http::{HeaderMap, StatusCode}; + +/// Authenticated request context. +/// +/// Contains the validated JWT claims, role name, and resolved permissions. +#[derive(Debug, Clone)] +pub struct AuthContext { + /// The validated JWT claims + pub claims: ApiClaims, + /// The role name from the token + pub role: String, + /// The permissions associated with this role + #[allow(dead_code)] + pub permissions: Permissions, +} + +#[allow(dead_code)] +impl AuthContext { + /// Get the JWT ID (for revocation tracking) + pub fn jti(&self) -> &str { + &self.claims.jti + } + + /// Get the subject (token holder identifier) + pub fn sub(&self) -> &str { + &self.claims.sub + } +} + +/// Optional authenticated request context. +/// +/// This type never fails - when authentication is not provided or fails, +/// it contains `None`. +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct MaybeAuth(pub Option); + +#[allow(dead_code)] +impl MaybeAuth { + /// Get the auth context if present + pub const fn context(&self) -> Option<&AuthContext> { + self.0.as_ref() + } + + /// Check if the request is authenticated + pub const fn is_authenticated(&self) -> bool { + self.0.is_some() + } + + /// Get a reference to the inner Option + #[allow(clippy::ref_option)] + pub const fn as_option(&self) -> &Option { + &self.0 + } + + /// Unwrap or return an unauthorized error. + /// + /// # Errors + /// + /// Returns `(StatusCode::UNAUTHORIZED, ...)` if not authenticated. + pub fn require(self) -> Result { + self.0.ok_or_else(|| (StatusCode::UNAUTHORIZED, "Authentication required".to_string())) + } +} + +/// Extract token from Authorization header or cookie. +/// +/// Checks the Authorization header first (Bearer token format), +/// then falls back to the configured cookie name. +pub fn extract_token(headers: &HeaderMap, config: &Config) -> Option { + // Try Authorization header first: "Bearer " + if let Some(auth_header) = headers.get(AUTHORIZATION) { + if let Ok(auth_str) = auth_header.to_str() { + if let Some(token) = auth_str.strip_prefix("Bearer ") { + return Some(token.to_string()); + } + } + } + + // Fall back to cookie + if let Some(cookie_header) = headers.get(COOKIE) { + if let Ok(cookie_str) = cookie_header.to_str() { + let cookie_name = &config.auth.cookie_name; + for cookie in cookie_str.split(';') { + let cookie = cookie.trim(); + if let Some(value) = cookie.strip_prefix(&format!("{cookie_name}=")) { + return Some(value.to_string()); + } + } + } + } + + None +} + +/// Validate a token and return the AuthContext. +/// +/// This function performs: +/// 1. Token extraction from headers +/// 2. JWT signature verification +/// 3. Revocation check +/// 4. "Tokens we mint" enforcement +/// 5. Permission resolution +/// +/// # Errors +/// +/// Returns `(StatusCode, String)` on authentication failure. +pub async fn validate_token_from_headers( + headers: &HeaderMap, + auth_state: &AuthState, + config: &Config, + permissions_config: &crate::permissions::PermissionsConfig, +) -> Result { + // Extract token + let token = extract_token(headers, config).ok_or_else(|| { + (StatusCode::UNAUTHORIZED, "No authentication token provided".to_string()) + })?; + + validate_token(&token, auth_state, permissions_config).await +} + +/// Validate a raw token string and return the AuthContext. +/// +/// # Errors +/// +/// Returns `(StatusCode, String)` on authentication failure. +pub async fn validate_token( + token: &str, + auth_state: &AuthState, + permissions_config: &crate::permissions::PermissionsConfig, +) -> Result { + // Validate JWT + let claims = auth_state + .validate_api_token(token) + .map_err(|e| (StatusCode::UNAUTHORIZED, format!("Invalid token: {e}")))?; + + let token_hash = super::hash_token(token); + + // Check revocation + if let Some(revocation_store) = auth_state.revocation_store() { + if revocation_store.is_revoked(&token_hash) { + return Err((StatusCode::UNAUTHORIZED, "Token has been revoked".to_string())); + } + } + + // Check "tokens we mint" enforcement + if let Some(metadata_store) = auth_state.token_metadata_store() { + if !metadata_store.exists(&claims.jti).await { + return Err(( + StatusCode::UNAUTHORIZED, + "Token not recognized (not minted by this server)".to_string(), + )); + } + } + + if !permissions_config.roles.contains_key(&claims.role) { + return Err((StatusCode::UNAUTHORIZED, "Token has unknown role".to_string())); + } + + // Get permissions for the role + let permissions = permissions_config.get_role(&claims.role); + + Ok(AuthContext { role: claims.role.clone(), claims, permissions }) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use axum::http::header::HeaderValue; + + fn make_headers(auth: Option<&str>, cookie: Option<&str>) -> HeaderMap { + let mut headers = HeaderMap::new(); + if let Some(auth) = auth { + headers.insert(AUTHORIZATION, HeaderValue::from_str(auth).unwrap()); + } + if let Some(cookie) = cookie { + headers.insert(COOKIE, HeaderValue::from_str(cookie).unwrap()); + } + headers + } + + #[test] + fn test_extract_bearer_token() { + let config = Config::default(); + let headers = make_headers(Some("Bearer my-token-123"), None); + + let token = extract_token(&headers, &config); + assert_eq!(token, Some("my-token-123".to_string())); + } + + #[test] + fn test_extract_cookie_token() { + let mut config = Config::default(); + config.auth.cookie_name = "skit_session".to_string(); + + let headers = + make_headers(None, Some("other=value; skit_session=cookie-token-456; another=x")); + + let token = extract_token(&headers, &config); + assert_eq!(token, Some("cookie-token-456".to_string())); + } + + #[test] + fn test_bearer_takes_precedence() { + let mut config = Config::default(); + config.auth.cookie_name = "skit_session".to_string(); + + let headers = make_headers(Some("Bearer bearer-token"), Some("skit_session=cookie-token")); + + let token = extract_token(&headers, &config); + assert_eq!(token, Some("bearer-token".to_string())); + } + + #[test] + fn test_no_token() { + let config = Config::default(); + let headers = make_headers(None, None); + + let token = extract_token(&headers, &config); + assert!(token.is_none()); + } + + #[test] + fn test_invalid_auth_header_format() { + let config = Config::default(); + let headers = make_headers(Some("Basic dXNlcjpwYXNz"), None); + + let token = extract_token(&headers, &config); + assert!(token.is_none()); + } + + #[test] + fn test_maybe_auth_require() { + let auth = MaybeAuth(None); + assert!(auth.require().is_err()); + + let ctx = AuthContext { + claims: ApiClaims::anonymous("admin"), + role: "admin".to_string(), + permissions: crate::permissions::Permissions::admin(), + }; + let auth = MaybeAuth(Some(ctx)); + assert!(auth.require().is_ok()); + } +} diff --git a/apps/skit/src/auth/handlers.rs b/apps/skit/src/auth/handlers.rs new file mode 100644 index 00000000..c90f8112 --- /dev/null +++ b/apps/skit/src/auth/handlers.rs @@ -0,0 +1,486 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! HTTP handlers for authentication endpoints. +//! +//! These handlers provide: +//! - `/api/v1/auth/login` - Verify token and set session cookie +//! - `/api/v1/auth/logout` - Clear session cookie +//! - `/api/v1/auth/me` - Get current auth status and role +//! - `/api/v1/auth/tokens` - List/create/revoke API tokens (admin only) +//! - `/api/v1/auth/moq-tokens` - Create MoQ tokens (admin only) + +use crate::auth::{ + build_logout_cookie, build_session_cookie, validate_token, validate_token_from_headers, + AuthContext, +}; +use crate::state::AppState; +use axum::extract::{DefaultBodyLimit, Path, State}; +use axum::http::header::AUTHORIZATION; +use axum::http::{header::SET_COOKIE, HeaderMap, StatusCode}; +use axum::response::IntoResponse; +use axum::Json; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use tower::limit::ConcurrencyLimitLayer; + +const AUTH_MAX_BODY_BYTES: usize = 64 * 1024; +const AUTH_MAX_CONCURRENCY: usize = 64; + +/// Request body for login endpoint. +#[derive(Debug, Deserialize)] +pub struct LoginRequest { + /// The API token to validate and set as session cookie + pub token: String, +} + +/// Response for /me endpoint. +#[derive(Debug, Serialize)] +pub struct MeResponse { + pub authenticated: bool, + pub auth_enabled: bool, + pub role: Option, + pub jti: Option, +} + +/// Request body for creating an API token. +#[derive(Debug, Deserialize, Serialize)] +pub struct CreateApiTokenRequest { + pub role: String, + #[serde(default)] + pub label: Option, + /// TTL in seconds (uses default if not specified) + #[serde(default)] + pub ttl_secs: Option, +} + +/// Request body for creating a MoQ token. +#[derive(Debug, Deserialize, Serialize)] +#[allow(dead_code)] +pub struct CreateMoqTokenRequest { + pub root: String, + #[serde(default)] + pub subscribe: Vec, + #[serde(default)] + pub publish: Vec, + #[serde(default)] + pub label: Option, + /// TTL in seconds (uses default if not specified) + #[serde(default)] + pub ttl_secs: Option, +} + +/// Response for token creation. +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateTokenResponse { + pub token: String, + pub jti: String, + pub exp: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub url_template: Option, +} + +/// Token info for listing. +#[derive(Debug, Serialize)] +pub struct TokenInfo { + pub jti: String, + pub token_type: String, + pub role: Option, + pub label: Option, + pub created_at: u64, + pub exp: u64, + pub revoked: bool, + pub created_by: String, +} + +/// Helper to get auth context from headers, returning appropriate errors. +async fn get_auth_context( + headers: &HeaderMap, + app_state: &AppState, +) -> Result { + if !app_state.auth.is_enabled() { + // Auth disabled - use legacy role extraction + let (role, permissions) = + crate::role_extractor::get_role_and_permissions(headers, &Arc::new(app_state.clone())); + return Ok(AuthContext { + claims: crate::auth::ApiClaims::anonymous(&role), + role, + permissions, + }); + } + + validate_token_from_headers( + headers, + &app_state.auth, + &app_state.config, + &app_state.config.permissions, + ) + .await +} + +/// Helper to require admin role. +fn require_admin(auth: &AuthContext) -> Result<(), (StatusCode, String)> { + if auth.role != "admin" { + return Err((StatusCode::FORBIDDEN, "Admin role required".to_string())); + } + Ok(()) +} + +/// POST /api/v1/auth/login +/// +/// Validates a token and sets it as a session cookie. +pub async fn login_handler( + State(app_state): State>, + headers: HeaderMap, + payload: Option>, +) -> impl IntoResponse { + if !app_state.auth.is_enabled() { + return (StatusCode::BAD_REQUEST, "Authentication is disabled".to_string()).into_response(); + } + + let token_from_header = headers + .get(AUTHORIZATION) + .and_then(|h| h.to_str().ok()) + .and_then(|auth| auth.strip_prefix("Bearer ").map(str::to_string)); + let token_from_body = payload.map(|Json(req)| req.token); + let token = match token_from_header.or(token_from_body) { + Some(token) if !token.trim().is_empty() => token, + _ => return (StatusCode::BAD_REQUEST, "Missing token".to_string()).into_response(), + }; + + // Validate the token + let auth_ctx = + match validate_token(&token, &app_state.auth, &app_state.config.permissions).await { + Ok(ctx) => ctx, + Err((status, msg)) => return (status, msg).into_response(), + }; + + let now = match crate::auth::now_secs() { + Ok(now) => now, + Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(), + }; + + // Build session cookie + let Some(cookie) = + build_session_cookie(&token, &app_state.config, auth_ctx.claims.exp.saturating_sub(now)) + else { + return (StatusCode::INTERNAL_SERVER_ERROR, "Failed to build cookie".to_string()) + .into_response(); + }; + + (StatusCode::NO_CONTENT, [(SET_COOKIE, cookie)]).into_response() +} + +/// POST /api/v1/auth/logout +/// +/// Clears the session cookie. +pub async fn logout_handler(State(app_state): State>) -> impl IntoResponse { + let Some(cookie) = build_logout_cookie(&app_state.config) else { + return (StatusCode::INTERNAL_SERVER_ERROR, "Failed to build cookie".to_string()) + .into_response(); + }; + + (StatusCode::NO_CONTENT, [(SET_COOKIE, cookie)]).into_response() +} + +/// GET /api/v1/auth/me +/// +/// Returns current authentication status. +pub async fn me_handler( + State(app_state): State>, + headers: HeaderMap, +) -> impl IntoResponse { + let auth_enabled = app_state.auth.is_enabled(); + + if !auth_enabled { + // Auth disabled - return default role info + let (role, _) = crate::role_extractor::get_role_and_permissions(&headers, &app_state); + return Json(MeResponse { + authenticated: true, // Consider everyone authenticated when auth is disabled + auth_enabled: false, + role: Some(role), + jti: None, + }); + } + + // Try to get auth context + match get_auth_context(&headers, &app_state).await { + Ok(auth_ctx) => Json(MeResponse { + authenticated: true, + auth_enabled: true, + role: Some(auth_ctx.role), + jti: Some(auth_ctx.claims.jti), + }), + Err(_) => { + Json(MeResponse { authenticated: false, auth_enabled: true, role: None, jti: None }) + }, + } +} + +/// POST /api/v1/auth/tokens +/// +/// Create a new API token (admin only). +pub async fn create_token_handler( + State(app_state): State>, + headers: HeaderMap, + Json(req): Json, +) -> impl IntoResponse { + if !app_state.auth.is_enabled() { + return (StatusCode::BAD_REQUEST, "Authentication is disabled".to_string()).into_response(); + } + + // Require auth + let auth_ctx = match get_auth_context(&headers, &app_state).await { + Ok(ctx) => ctx, + Err(e) => return e.into_response(), + }; + + // Require admin + if let Err(e) = require_admin(&auth_ctx) { + return e.into_response(); + } + + if !app_state.config.permissions.roles.contains_key(&req.role) { + return (StatusCode::BAD_REQUEST, "Unknown role".to_string()).into_response(); + } + + // Determine TTL + let ttl = req.ttl_secs.unwrap_or(app_state.config.auth.api_default_ttl_secs); + + // Mint the token + let (token, meta) = match app_state + .auth + .mint_api_token(&req.role, req.label.as_deref(), ttl, &auth_ctx.claims.jti) + .await + { + Ok(result) => result, + Err(e) => { + return (StatusCode::BAD_REQUEST, format!("Failed to create token: {e}")) + .into_response() + }, + }; + + Json(CreateTokenResponse { token, jti: meta.jti, exp: meta.exp, url_template: None }) + .into_response() +} + +/// GET /api/v1/auth/tokens +/// +/// List all minted tokens (admin only). +pub async fn list_tokens_handler( + State(app_state): State>, + headers: HeaderMap, +) -> impl IntoResponse { + if !app_state.auth.is_enabled() { + return (StatusCode::BAD_REQUEST, "Authentication is disabled".to_string()).into_response(); + } + + // Require auth + let auth_ctx = match get_auth_context(&headers, &app_state).await { + Ok(ctx) => ctx, + Err(e) => return e.into_response(), + }; + + // Require admin + if let Err(e) = require_admin(&auth_ctx) { + return e.into_response(); + } + + // Get token metadata store + let Some(store) = app_state.auth.token_metadata_store() else { + return ( + StatusCode::INTERNAL_SERVER_ERROR, + "Token metadata store not available".to_string(), + ) + .into_response(); + }; + + // List tokens + let tokens = match store.list().await { + Ok(t) => t, + Err(e) => { + return (StatusCode::INTERNAL_SERVER_ERROR, format!("Failed to list tokens: {e}")) + .into_response() + }, + }; + + // Convert to response format + let token_infos: Vec = tokens + .into_iter() + .map(|t| TokenInfo { + jti: t.jti, + token_type: t.token_type.to_string(), + role: t.role, + label: t.label, + created_at: t.created_at, + exp: t.exp, + revoked: t.revoked, + created_by: t.created_by, + }) + .collect(); + + Json(token_infos).into_response() +} + +/// DELETE /api/v1/auth/tokens/:jti +/// +/// Revoke a token by its jti (admin only). +pub async fn revoke_token_handler( + State(app_state): State>, + headers: HeaderMap, + Path(jti): Path, +) -> impl IntoResponse { + if !app_state.auth.is_enabled() { + return (StatusCode::BAD_REQUEST, "Authentication is disabled".to_string()).into_response(); + } + + // Require auth + let auth_ctx = match get_auth_context(&headers, &app_state).await { + Ok(ctx) => ctx, + Err(e) => return e.into_response(), + }; + + // Require admin + if let Err(e) = require_admin(&auth_ctx) { + return e.into_response(); + } + + // Prevent revoking own token + if auth_ctx.claims.jti == jti { + return (StatusCode::BAD_REQUEST, "Cannot revoke your own token".to_string()) + .into_response(); + } + + // Revoke the token + match app_state.auth.revoke_token(&jti).await { + Ok(()) => {}, + Err(crate::auth::AuthError::UnknownToken) => { + return (StatusCode::NOT_FOUND, "Token not found".to_string()).into_response(); + }, + Err(e) => { + return (StatusCode::INTERNAL_SERVER_ERROR, format!("Failed to revoke token: {e}")) + .into_response(); + }, + } + + (StatusCode::OK, "Token revoked").into_response() +} + +/// POST /api/v1/auth/moq-tokens +/// +/// Create a new MoQ token (admin only). +#[cfg(feature = "moq")] +pub async fn create_moq_token_handler( + State(app_state): State>, + headers: HeaderMap, + Json(req): Json, +) -> impl IntoResponse { + if !app_state.auth.is_enabled() { + return (StatusCode::BAD_REQUEST, "Authentication is disabled".to_string()).into_response(); + } + + // Require auth + let auth_ctx = match get_auth_context(&headers, &app_state).await { + Ok(ctx) => ctx, + Err(e) => return e.into_response(), + }; + + // Require admin + if let Err(e) = require_admin(&auth_ctx) { + return e.into_response(); + } + + // Determine TTL + let ttl = req.ttl_secs.unwrap_or(app_state.config.auth.moq_default_ttl_secs); + + // Mint the token + let (token, meta) = match app_state + .auth + .mint_moq_token( + &req.root, + req.subscribe, + req.publish, + req.label.as_deref(), + ttl, + &auth_ctx.claims.jti, + ) + .await + { + Ok(result) => result, + Err(e) => { + return (StatusCode::BAD_REQUEST, format!("Failed to create MoQ token: {e}")) + .into_response() + }, + }; + + let root_path = + if req.root.starts_with('/') { req.root.clone() } else { format!("/{}", req.root) }; + + // Prefer a full URL when the server is configured with a MoQ gateway URL (this matches what the + // Stream view expects). Otherwise fall back to a relative path. + let url_template = app_state + .config + .server + .moq_gateway_url + .as_deref() + .and_then(|gateway_url| { + let uri: axum::http::Uri = gateway_url.parse().ok()?; + let scheme = uri.scheme_str()?; + let authority = uri.authority()?.as_str(); + + let mut query = String::new(); + if let Some(existing) = uri.query() { + if !existing.is_empty() { + query.push_str(existing); + query.push('&'); + } + } + query.push_str("jwt="); + query.push_str(&token); + + let path_and_query = format!("{root_path}?{query}"); + let uri = axum::http::Uri::builder() + .scheme(scheme) + .authority(authority) + .path_and_query(path_and_query) + .build() + .ok()?; + Some(uri.to_string()) + }) + .or_else(|| Some(format!("{root_path}?jwt={token}"))); + Json(CreateTokenResponse { token, jti: meta.jti, exp: meta.exp, url_template }).into_response() +} + +/// Build the auth router with all authentication endpoints. +pub fn auth_router() -> axum::Router> { + use axum::routing::{delete, get, post}; + + #[cfg_attr(not(feature = "moq"), allow(unused_mut))] + let mut router = axum::Router::new() + .route( + "/login", + post(login_handler).layer(ConcurrencyLimitLayer::new(AUTH_MAX_CONCURRENCY)), + ) + .route("/logout", post(logout_handler)) + .route("/me", get(me_handler)) + .route( + "/tokens", + post(create_token_handler).layer(ConcurrencyLimitLayer::new(AUTH_MAX_CONCURRENCY)), + ) + .route("/tokens", get(list_tokens_handler)) + .route( + "/tokens/{jti}", + delete(revoke_token_handler).layer(ConcurrencyLimitLayer::new(AUTH_MAX_CONCURRENCY)), + ); + + #[cfg(feature = "moq")] + { + router = router.route( + "/moq-tokens", + post(create_moq_token_handler).layer(ConcurrencyLimitLayer::new(AUTH_MAX_CONCURRENCY)), + ); + } + + router.layer(DefaultBodyLimit::max(AUTH_MAX_BODY_BYTES)) +} diff --git a/apps/skit/src/auth/mod.rs b/apps/skit/src/auth/mod.rs new file mode 100644 index 00000000..fa599ad2 --- /dev/null +++ b/apps/skit/src/auth/mod.rs @@ -0,0 +1,704 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! Built-in JWT authentication for StreamKit. +//! +//! This module provides: +//! - JWT-based authentication for HTTP API, WebSocket, and MoQ/WebTransport +//! - Pluggable storage backends (file-based by default) +//! - Token revocation support +//! - Cookie-based browser sessions +//! +//! # Token Types +//! +//! - **API tokens** (`aud: "skit-api"`): For HTTP API and WebSocket control plane +//! - **MoQ tokens** (`aud: "skit-moq"`): For MoQ/WebTransport connections +//! +//! # Security Model +//! +//! - All tokens require `jti` claim for revocation support +//! - "Tokens we mint" policy: Only accept tokens whose jti is in our metadata store +//! - Raw tokens are never stored; only SHA-256 hashes are persisted +//! - Key material is stored with 0600 permissions + +pub mod claims; +pub mod cookie; +pub mod extractor; +pub mod handlers; +pub mod stores; + +#[cfg(feature = "moq")] +pub mod moq; +#[cfg(feature = "moq")] +#[allow(unused_imports)] +pub use moq::{verify_moq_token, MoqAuthContext}; + +pub use claims::{ApiClaims, AUD_API}; +#[cfg(feature = "moq")] +pub use claims::{MoqClaims, AUD_MOQ}; +pub use cookie::{build_logout_cookie, build_session_cookie}; +pub use extractor::{validate_token, validate_token_from_headers, AuthContext}; +pub use handlers::auth_router; +pub use stores::{ + AuthStoreError, FileKeyProvider, FileRevocationStore, FileTokenMetadataStore, KeyProvider, + RevocationStore, SigningKeyMaterial, TokenMetadata, TokenMetadataStore, TokenType, +}; + +use crate::config::{AuthConfig, AuthMode}; +use jsonwebtoken::{ + decode, decode_header, encode, Algorithm, DecodingKey, EncodingKey, Header, Validation, +}; +use sha2::{Digest, Sha256}; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::{SystemTime, SystemTimeError, UNIX_EPOCH}; +use tracing::{debug, info}; + +/// Errors that can occur during authentication. +#[derive(Debug, thiserror::Error)] +#[allow(dead_code)] +pub enum AuthError { + #[error("Authentication is disabled")] + Disabled, + + #[error("Store error: {0}")] + Store(#[from] AuthStoreError), + + #[error("JWT error: {0}")] + Jwt(#[from] jsonwebtoken::errors::Error), + + #[error("Claims validation error: {0}")] + Claims(#[from] claims::ClaimsValidationError), + + #[error("Token not found in metadata store (not minted by this server)")] + UnknownToken, + + #[error("Token has been revoked")] + Revoked, + + #[error("Token expired")] + Expired, + + #[error("Invalid audience: expected {expected}, got {actual}")] + InvalidAudience { expected: String, actual: String }, + + #[error("TTL exceeds maximum allowed ({max} seconds)")] + TtlExceedsMax { max: u64 }, + + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("System time error: {0}")] + Time(#[from] SystemTimeError), + + #[error("Task join error: {0}")] + TaskJoin(#[from] tokio::task::JoinError), + + #[cfg(feature = "moq")] + #[error("MoQ auth error: {0}")] + Moq(String), +} + +/// Central authentication state for the server. +/// +/// This struct manages all auth-related state including: +/// - Whether auth is enabled +/// - Key storage and rotation +/// - Token revocation +/// - Token metadata ("tokens we mint") +pub struct AuthState { + enabled: bool, + config: AuthConfig, + key_provider: Option>, + revocation_store: Option>, + token_metadata_store: Option>, +} + +impl AuthState { + /// Create a disabled AuthState (sync, for use during initialization). + /// + /// This is useful when auth is disabled or when you need to create + /// the state synchronously before fully initializing. + pub fn disabled() -> Self { + Self { + enabled: false, + config: AuthConfig::default(), + key_provider: None, + revocation_store: None, + token_metadata_store: None, + } + } + + /// Create a new AuthState, initializing stores if auth is enabled. + /// + /// # Errors + /// + /// Returns errors for store initialization failures, I/O errors, or + /// bootstrap token creation failures. + pub async fn new(config: &AuthConfig, enabled: bool) -> Result { + if !enabled { + info!("Authentication disabled"); + return Ok(Self { + enabled: false, + config: config.clone(), + key_provider: None, + revocation_store: None, + token_metadata_store: None, + }); + } + + let state_dir = PathBuf::from(&config.state_dir); + info!(state_dir = %state_dir.display(), "Initializing authentication"); + + // Initialize stores + let key_provider = Arc::new(FileKeyProvider::load_or_init(&state_dir).await?); + let revocation_store = Arc::new(FileRevocationStore::new(&state_dir).await?); + let token_metadata_store = Arc::new(FileTokenMetadataStore::new(&state_dir).await?); + + // Check if we need to create bootstrap admin token + let tokens = token_metadata_store.list().await?; + if tokens.is_empty() { + info!("No tokens found, creating bootstrap admin token"); + let state = Self { + enabled: true, + config: config.clone(), + key_provider: Some(key_provider.clone()), + revocation_store: Some(revocation_store.clone()), + token_metadata_store: Some(token_metadata_store.clone()), + }; + + let (token, _meta) = state + .mint_api_token( + "admin", + Some("Bootstrap admin token"), + config.api_max_ttl_secs, + "bootstrap", + ) + .await?; + + // Write bootstrap token to file + let token_path = state_dir.join("admin.token"); + FileKeyProvider::write_secure(&token_path, &token).await?; + + info!(path = %token_path.display(), "Bootstrap admin token written"); + + return Ok(state); + } + + Ok(Self { + enabled: true, + config: config.clone(), + key_provider: Some(key_provider), + revocation_store: Some(revocation_store), + token_metadata_store: Some(token_metadata_store), + }) + } + + /// Check if authentication is enabled. + pub const fn is_enabled(&self) -> bool { + self.enabled + } + + /// Get the revocation store (for checking revocation status). + pub fn revocation_store(&self) -> Option<&Arc> { + self.revocation_store.as_ref() + } + + /// Check if a token is revoked by its token hash. + /// + /// Returns false if auth is disabled or if the revocation store is not available. + #[allow(dead_code)] + pub fn is_revoked(&self, token_hash: &str) -> bool { + self.revocation_store.as_ref().is_some_and(|store| store.is_revoked(token_hash)) + } + + /// Get the token metadata store. + pub fn token_metadata_store(&self) -> Option<&Arc> { + self.token_metadata_store.as_ref() + } + + /// Get the key provider. + #[allow(dead_code)] + pub fn key_provider(&self) -> Option<&Arc> { + self.key_provider.as_ref() + } + + /// Validate an API token and return its claims. + /// + /// This performs: + /// 1. JWT signature verification + /// 2. Expiration check + /// 3. Audience validation + /// 4. Claims structure validation + /// + /// Note: Revocation and "tokens we mint" checks should be done separately + /// by the caller for flexibility. + /// + /// # Errors + /// + /// Returns errors for invalid tokens, expired tokens, signature verification + /// failures, or disabled auth. + pub fn validate_api_token(&self, token: &str) -> Result { + let key_provider = self.key_provider.as_ref().ok_or(AuthError::Disabled)?; + + // Set up validation + let mut validation = Validation::new(Algorithm::EdDSA); + validation.set_audience(&[AUD_API]); + validation.set_required_spec_claims(&["exp", "aud", "jti"]); + + // Prefer selecting the verification key by `kid` (header), but fall back to trying all + // known keys if `kid` is missing (best-effort compatibility). + let header = decode_header(token)?; + let mut candidates: Vec = Vec::new(); + + if let Some(kid) = header.kid { + candidates.push(kid); + } else { + candidates.extend(key_provider.valid_kids()); + } + + let mut last_error = None; + + for kid in candidates { + let Some(key_material) = key_provider.verification_key(&kid) else { + continue; + }; + + let decoding_key = DecodingKey::from_ed_der(&key_material.public_key); + match decode::(token, &decoding_key, &validation) { + Ok(token_data) => { + let claims = token_data.claims; + claims.validate()?; + debug!(jti = %claims.jti, role = %claims.role, kid = %kid, "API token validated"); + return Ok(claims); + }, + Err(e) => { + last_error = Some(e); + }, + } + } + + Err(last_error.map_or_else( + || AuthError::Jwt(jsonwebtoken::errors::ErrorKind::InvalidSignature.into()), + AuthError::Jwt, + )) + } + + /// Validate a MoQ token and return its claims. + /// + /// # Errors + /// + /// Returns errors for invalid tokens, expired tokens, signature verification + /// failures, or disabled auth. + #[cfg(feature = "moq")] + pub fn validate_moq_token(&self, token: &str) -> Result { + let key_provider = self.key_provider.as_ref().ok_or(AuthError::Disabled)?; + + let mut validation = Validation::new(Algorithm::EdDSA); + validation.set_audience(&[AUD_MOQ]); + validation.set_required_spec_claims(&["exp", "aud", "jti"]); + + let header = decode_header(token)?; + let mut candidates: Vec = Vec::new(); + + if let Some(kid) = header.kid { + candidates.push(kid); + } else { + candidates.extend(key_provider.valid_kids()); + } + + let mut last_error = None; + + for kid in candidates { + let Some(key_material) = key_provider.verification_key(&kid) else { + continue; + }; + + let decoding_key = DecodingKey::from_ed_der(&key_material.public_key); + match decode::(token, &decoding_key, &validation) { + Ok(token_data) => { + let claims = token_data.claims; + claims.validate()?; + debug!(jti = %claims.jti, root = %claims.root, kid = %kid, "MoQ token validated"); + return Ok(claims); + }, + Err(e) => { + last_error = Some(e); + }, + } + } + + Err(last_error.map_or_else( + || AuthError::Jwt(jsonwebtoken::errors::ErrorKind::InvalidSignature.into()), + AuthError::Jwt, + )) + } + + /// Mint a new API token. + /// + /// Returns the raw token string and its metadata. + /// + /// # Errors + /// + /// Returns errors if auth is disabled, TTL exceeds max, or token storage fails. + pub async fn mint_api_token( + &self, + role: &str, + label: Option<&str>, + ttl_secs: u64, + created_by: &str, + ) -> Result<(String, TokenMetadata), AuthError> { + let key_provider = self.key_provider.as_ref().ok_or(AuthError::Disabled)?; + let metadata_store = self.token_metadata_store.as_ref().ok_or(AuthError::Disabled)?; + + // Validate TTL + if ttl_secs > self.config.api_max_ttl_secs { + return Err(AuthError::TtlExceedsMax { max: self.config.api_max_ttl_secs }); + } + + let now = now_secs()?; + + let jti = uuid::Uuid::new_v4().to_string(); + let exp = now + ttl_secs; + + let claims = ApiClaims { + aud: AUD_API.to_string(), + sub: format!("token:{jti}"), + role: role.to_string(), + iat: now, + exp, + jti: jti.clone(), + }; + + // Sign the token (key access uses std::sync locks, so keep it off core async tasks) + let key_provider_clone = key_provider.clone(); + let key_material = + tokio::task::spawn_blocking(move || key_provider_clone.active_key()).await?; + let mut header = Header::new(Algorithm::EdDSA); + header.kid = Some(key_material.kid.clone()); + + let encoding_key = EncodingKey::from_ed_der(&key_material.pkcs8); + let token = encode(&header, &claims, &encoding_key)?; + + // Store metadata (hash the token, never store raw) + let token_hash = hash_token(&token); + let meta = TokenMetadata { + jti: jti.clone(), + token_hash, + token_type: TokenType::Api, + role: Some(role.to_string()), + label: label.map(String::from), + created_at: now, + exp, + revoked: false, + created_by: created_by.to_string(), + }; + + metadata_store.store(meta.clone()).await?; + + info!(jti = %jti, role = %role, ttl_secs, "Minted API token"); + + Ok((token, meta)) + } + + /// Mint a new MoQ token. + /// + /// # Errors + /// + /// Returns errors if auth is disabled, TTL exceeds max, or token storage fails. + #[cfg(feature = "moq")] + pub async fn mint_moq_token( + &self, + root: &str, + subscribe: Vec, + publish: Vec, + label: Option<&str>, + ttl_secs: u64, + created_by: &str, + ) -> Result<(String, TokenMetadata), AuthError> { + let key_provider = self.key_provider.as_ref().ok_or(AuthError::Disabled)?; + let metadata_store = self.token_metadata_store.as_ref().ok_or(AuthError::Disabled)?; + + // Validate TTL + if ttl_secs > self.config.moq_max_ttl_secs { + return Err(AuthError::TtlExceedsMax { max: self.config.moq_max_ttl_secs }); + } + + let now = now_secs()?; + + let jti = uuid::Uuid::new_v4().to_string(); + let exp = now + ttl_secs; + + let claims = MoqClaims { + aud: AUD_MOQ.to_string(), + root: root.to_string(), + subscribe, + publish, + iat: now, + exp, + jti: jti.clone(), + }; + + // Sign the token (key access uses std::sync locks, so keep it off core async tasks) + let key_provider_clone = key_provider.clone(); + let key_material = + tokio::task::spawn_blocking(move || key_provider_clone.active_key()).await?; + let mut header = Header::new(Algorithm::EdDSA); + header.kid = Some(key_material.kid.clone()); + + let encoding_key = EncodingKey::from_ed_der(&key_material.pkcs8); + let token = encode(&header, &claims, &encoding_key)?; + + // Store metadata + let token_hash = hash_token(&token); + let meta = TokenMetadata { + jti: jti.clone(), + token_hash, + token_type: TokenType::Moq, + role: None, + label: label.map(String::from), + created_at: now, + exp, + revoked: false, + created_by: created_by.to_string(), + }; + + metadata_store.store(meta.clone()).await?; + + info!(jti = %jti, root = %root, ttl_secs, "Minted MoQ token"); + + Ok((token, meta)) + } + + /// Revoke a token by its jti. + /// + /// # Errors + /// + /// Returns errors if auth is disabled, token not found, or store operation fails. + pub async fn revoke_token(&self, jti: &str) -> Result<(), AuthError> { + let revocation_store = self.revocation_store.as_ref().ok_or(AuthError::Disabled)?; + let metadata_store = self.token_metadata_store.as_ref().ok_or(AuthError::Disabled)?; + + // Get the token metadata to find expiration + let meta = metadata_store.get(jti).await?; + let meta = meta.ok_or(AuthError::UnknownToken)?; + let token_hash = meta.token_hash; + let exp = meta.exp; + + // Add to revocation store + revocation_store.revoke(&token_hash, exp).await?; + + // Mark as revoked in metadata + metadata_store.mark_revoked(jti).await?; + + info!(jti = %jti, "Token revoked"); + + Ok(()) + } + + /// Rotate the signing key. + /// + /// # Errors + /// + /// Returns errors if auth is disabled or key provider rotation fails. + pub async fn rotate_key(&self) -> Result { + let key_provider = self.key_provider.as_ref().ok_or(AuthError::Disabled)?; + let new_key = key_provider.rotate().await?; + info!(kid = %new_key.kid, "Signing key rotated"); + Ok(new_key) + } + + /// Check if auth should be enabled based on config and bind address. + #[allow(dead_code)] + pub const fn should_enable(config: &AuthConfig, bind_addr: &std::net::SocketAddr) -> bool { + match config.mode { + AuthMode::Auto => !bind_addr.ip().is_loopback(), + AuthMode::Enabled => true, + AuthMode::Disabled => false, + } + } +} + +/// Compute SHA-256 hash of a token (hex-encoded). +pub fn hash_token(token: &str) -> String { + let mut hasher = Sha256::new(); + hasher.update(token.as_bytes()); + hex::encode(hasher.finalize()) +} + +/// Get current Unix timestamp in seconds. +/// +/// # Errors +/// +/// Returns an error if the system clock is before Unix epoch. +pub fn now_secs() -> Result { + Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use tempfile::TempDir; + + async fn create_test_auth_state() -> (AuthState, TempDir) { + let temp_dir = TempDir::new().unwrap(); + let config = AuthConfig { + mode: AuthMode::Enabled, + state_dir: temp_dir.path().to_string_lossy().to_string(), + cookie_name: "test_session".to_string(), + api_default_ttl_secs: 3600, + api_max_ttl_secs: 86400, + moq_default_ttl_secs: 3600, + moq_max_ttl_secs: 86400, + }; + + let state = AuthState::new(&config, true).await.unwrap(); + (state, temp_dir) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_auth_state_disabled() { + let temp_dir = TempDir::new().unwrap(); + let config = AuthConfig { + mode: AuthMode::Disabled, + state_dir: temp_dir.path().to_string_lossy().to_string(), + ..Default::default() + }; + + let state = AuthState::new(&config, false).await.unwrap(); + assert!(!state.is_enabled()); + assert!(state.key_provider().is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_mint_and_validate_api_token() { + let (state, _temp_dir) = create_test_auth_state().await; + let state = Arc::new(state); + + // Mint a token + let (token, meta) = + state.mint_api_token("admin", Some("Test token"), 3600, "test").await.unwrap(); + + assert!(!token.is_empty()); + assert_eq!(meta.role, Some("admin".to_string())); + assert!(!meta.revoked); + + // Validate the token (uses blocking_read internally) + let state_clone = state.clone(); + let token_clone = token.clone(); + let claims = + tokio::task::spawn_blocking(move || state_clone.validate_api_token(&token_clone)) + .await + .unwrap() + .unwrap(); + assert_eq!(claims.jti, meta.jti); + assert_eq!(claims.role, "admin"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_token_revocation() { + let (state, _temp_dir) = create_test_auth_state().await; + let state = Arc::new(state); + + // Mint and validate + let (token, meta) = state.mint_api_token("user", None, 3600, "test").await.unwrap(); + + let state_clone = state.clone(); + let token_clone = token.clone(); + let claims = + tokio::task::spawn_blocking(move || state_clone.validate_api_token(&token_clone)) + .await + .unwrap() + .unwrap(); + assert_eq!(claims.jti, meta.jti); + + let revocation_store = state.revocation_store().unwrap().clone(); + let hash_clone = hash_token(&token); + let is_revoked = + tokio::task::spawn_blocking(move || revocation_store.is_revoked(&hash_clone)) + .await + .unwrap(); + assert!(!is_revoked); + + // Revoke + state.revoke_token(&meta.jti).await.unwrap(); + + let revocation_store = state.revocation_store().unwrap().clone(); + let hash_clone = hash_token(&token); + let is_revoked = + tokio::task::spawn_blocking(move || revocation_store.is_revoked(&hash_clone)) + .await + .unwrap(); + assert!(is_revoked); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_ttl_max_enforcement() { + let (state, _temp_dir) = create_test_auth_state().await; + + // Try to mint with TTL exceeding max + let result = state.mint_api_token("admin", None, 1_000_000, "test").await; + + assert!(matches!(result, Err(AuthError::TtlExceedsMax { .. }))); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_bootstrap_token_created() { + let temp_dir = TempDir::new().unwrap(); + let config = AuthConfig { + mode: AuthMode::Enabled, + state_dir: temp_dir.path().to_string_lossy().to_string(), + api_max_ttl_secs: 86400, + ..Default::default() + }; + + // First initialization should create bootstrap token + let _state = AuthState::new(&config, true).await.unwrap(); + + // Check bootstrap token file exists + let token_path = temp_dir.path().join("admin.token"); + assert!(token_path.exists()); + + let token = tokio::fs::read_to_string(&token_path).await.unwrap(); + assert!(!token.is_empty()); + } + + #[test] + fn test_hash_token() { + let hash1 = hash_token("test-token-1"); + let hash2 = hash_token("test-token-1"); + let hash3 = hash_token("test-token-2"); + + // Same input = same hash + assert_eq!(hash1, hash2); + // Different input = different hash + assert_ne!(hash1, hash3); + // Hash is hex-encoded SHA-256 (64 chars) + assert_eq!(hash1.len(), 64); + } + + #[test] + fn test_should_enable() { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + let loopback = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4545); + let any = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 4545); + + // Auto mode + let auto_config = AuthConfig { mode: AuthMode::Auto, ..Default::default() }; + assert!(!AuthState::should_enable(&auto_config, &loopback)); + assert!(AuthState::should_enable(&auto_config, &any)); + + // Enabled mode + let enabled_config = AuthConfig { mode: AuthMode::Enabled, ..Default::default() }; + assert!(AuthState::should_enable(&enabled_config, &loopback)); + assert!(AuthState::should_enable(&enabled_config, &any)); + + // Disabled mode + let disabled_config = AuthConfig { mode: AuthMode::Disabled, ..Default::default() }; + assert!(!AuthState::should_enable(&disabled_config, &loopback)); + assert!(!AuthState::should_enable(&disabled_config, &any)); + } +} diff --git a/apps/skit/src/auth/moq.rs b/apps/skit/src/auth/moq.rs new file mode 100644 index 00000000..86a52bb4 --- /dev/null +++ b/apps/skit/src/auth/moq.rs @@ -0,0 +1,277 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! MoQ authentication context and path verification. +//! +//! This module implements moq-relay compatible path reduction logic: +//! - The JWT `root` claim specifies the URL path prefix the token is valid for +//! - `subscribe`/`publish` are broadcast path prefixes (not URL paths) +//! - Permissions are reduced based on connection path depth +//! +//! # Path Matching +//! +//! Path matching uses segment-based comparison (like moq-relay), not string +//! `starts_with`. This prevents path confusion attacks: +//! - Token with `root: "/moq"` does NOT match `/moq2` (different segment) +//! - Token with `root: "/moq"` DOES match `/moq/session1` (prefix segments) + +use super::{AuthError, MoqClaims}; +use moq_lite::{Path, PathOwned}; +use streamkit_core::moq_gateway::MoqAuthChecker; + +/// Verified MoQ auth context after path reduction. +/// +/// This struct represents the reduced permissions for a specific connection, +/// after validating the JWT and reducing permissions based on the connection +/// path depth (similar to moq-relay's `AuthToken`). +#[derive(Debug, Clone)] +pub struct MoqAuthContext { + /// The actual connection path (after root validation) + #[allow(dead_code)] + pub root: PathOwned, + /// Reduced subscribe permissions (broadcast paths relative to connection) + pub subscribe: Vec, + /// Reduced publish permissions (broadcast paths relative to connection) + pub publish: Vec, +} + +impl MoqAuthContext { + fn check_permission(broadcast: &str, allowed: &[PathOwned]) -> bool { + if allowed.is_empty() { + return false; + } + + let broadcast_path = Path::new(broadcast); + + // Check if any allowed path is a prefix of (or matches) the broadcast + allowed.iter().any(|allowed_path| { + if allowed_path.is_empty() { + // [""] = root allowed = any broadcast + true + } else { + // Segment-based prefix check: allowed_path must be a prefix of broadcast_path + // This means broadcast_path should be able to strip allowed_path as prefix + broadcast_path.strip_prefix(allowed_path).is_some() + } + }) + } +} + +/// Implement the core trait for permission checking. +/// This allows nodes to check permissions without knowing the full MoqAuthContext type. +impl MoqAuthChecker for MoqAuthContext { + fn can_subscribe(&self, broadcast: &str) -> bool { + Self::check_permission(broadcast, &self.subscribe) + } + + fn can_publish(&self, broadcast: &str) -> bool { + Self::check_permission(broadcast, &self.publish) + } +} + +/// Verify MoQ JWT and reduce permissions based on connection path. +/// +/// This function implements moq-relay style path reduction: +/// 1. Verify the connection URL path starts with the token's `root` (segment-based) +/// 2. Compute the suffix (connection path minus root) +/// 3. Reduce subscribe/publish permissions based on suffix depth +/// +/// # Arguments +/// * `claims` - The validated MoQ JWT claims +/// * `connection_path` - The URL path from the WebTransport connection (e.g., "/moq/session1") +/// +/// # Returns +/// * `Ok(MoqAuthContext)` - Reduced permissions for this connection +/// * `Err(AuthError)` - If root doesn't match connection path +/// +/// # Errors +/// +/// Returns `AuthError::Moq` if the connection path doesn't match the token's root. +/// +/// # Example +/// ```ignore +/// // Token claims: +/// // root: "/moq" +/// // subscribe: ["session1/output", ""] +/// // publish: ["session1/input"] +/// +/// // Connection to "/moq/session1": +/// // suffix = "session1" +/// // subscribe reduced to: ["output", ""] (empty string = allow all) +/// // publish reduced to: ["input"] +/// ``` +pub fn verify_moq_token( + claims: &MoqClaims, + connection_path: &str, +) -> Result { + // Parse paths using moq_lite::Path for segment-based matching + let root = Path::new(&claims.root); + let url_path = Path::new(connection_path); + + // URL path must start with root (segment-based, not string starts_with) + let suffix = url_path.strip_prefix(root).ok_or_else(|| { + AuthError::Moq(format!( + "Connection path '{}' does not match token root '{}'", + connection_path, claims.root + )) + })?; + + // Reduce subscribe permissions based on connection depth + let subscribe = claims.subscribe.iter().filter_map(|p| reduce_permission(p, &suffix)).collect(); + + // Reduce publish permissions the same way + let publish = claims.publish.iter().filter_map(|p| reduce_permission(p, &suffix)).collect(); + + Ok(MoqAuthContext { root: url_path.to_owned(), subscribe, publish }) +} + +/// Reduce a permission path based on connection suffix. +/// +/// If the permission is empty (root = allow all), it stays allowed. +/// Otherwise, strip the suffix from the permission and return the remainder. +fn reduce_permission(permission: &str, suffix: &Path) -> Option { + let p = Path::new(permission); + + if p.is_empty() { + // [""] = root allowed, stays allowed at any depth + Some(p.to_owned()) + } else if suffix.is_empty() { + // No suffix = keep permission as-is + Some(p.to_owned()) + } else { + // Only keep if suffix is a prefix of the permission path + p.strip_prefix(suffix).map(|reduced| reduced.to_owned()) + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + + fn make_claims(root: &str, subscribe: Vec<&str>, publish: Vec<&str>) -> MoqClaims { + MoqClaims { + aud: "skit-moq".to_string(), + root: root.to_string(), + subscribe: subscribe.into_iter().map(String::from).collect(), + publish: publish.into_iter().map(String::from).collect(), + iat: 0, + exp: u64::MAX, + jti: "test".to_string(), + } + } + + #[test] + fn test_exact_root_match() { + let claims = make_claims("/moq", vec![""], vec![""]); + let ctx = verify_moq_token(&claims, "/moq").unwrap(); + + assert!(ctx.can_subscribe("anything")); + assert!(ctx.can_publish("anything")); + } + + #[test] + fn test_root_prefix_match() { + let claims = make_claims("/moq", vec!["session1/output"], vec!["session1/input"]); + let ctx = verify_moq_token(&claims, "/moq/session1").unwrap(); + + // Permissions reduced by stripping "session1" prefix + assert!(ctx.can_subscribe("output")); + assert!(ctx.can_publish("input")); + assert!(!ctx.can_subscribe("other")); + assert!(!ctx.can_publish("other")); + } + + #[test] + fn test_root_mismatch_segment() { + // Token root "/moq" should NOT match "/moq2" (different segment) + let claims = make_claims("/moq", vec![""], vec![""]); + let result = verify_moq_token(&claims, "/moq2"); + assert!(result.is_err()); + } + + #[test] + fn test_root_mismatch_completely_different() { + let claims = make_claims("/moq/session1", vec![""], vec![""]); + let result = verify_moq_token(&claims, "/other/path"); + assert!(result.is_err()); + } + + #[test] + fn test_empty_permissions_deny_all() { + let claims = make_claims("/moq", vec![], vec![]); + let ctx = verify_moq_token(&claims, "/moq").unwrap(); + + assert!(!ctx.can_subscribe("anything")); + assert!(!ctx.can_publish("anything")); + } + + #[test] + fn test_subscribe_only() { + let claims = make_claims("/moq", vec![""], vec![]); + let ctx = verify_moq_token(&claims, "/moq").unwrap(); + + assert!(ctx.can_subscribe("anything")); + assert!(!ctx.can_publish("anything")); + } + + #[test] + fn test_publish_only() { + let claims = make_claims("/moq", vec![], vec![""]); + let ctx = verify_moq_token(&claims, "/moq").unwrap(); + + assert!(!ctx.can_subscribe("anything")); + assert!(ctx.can_publish("anything")); + } + + #[test] + fn test_deep_path_reduction() { + // Token allows publishing to "a/b/c/input" under root "/moq" + let claims = make_claims("/moq", vec![], vec!["a/b/c/input"]); + + // Connect to "/moq/a/b" + let ctx = verify_moq_token(&claims, "/moq/a/b").unwrap(); + + // Permission should be reduced to "c/input" + assert!(ctx.can_publish("c/input")); + assert!(ctx.can_publish("c/input/more")); // prefix match + assert!(!ctx.can_publish("input")); // doesn't match reduced permission + } + + #[test] + fn test_multiple_permissions() { + let claims = make_claims("/moq", vec!["output1", "output2"], vec!["input"]); + let ctx = verify_moq_token(&claims, "/moq").unwrap(); + + assert!(ctx.can_subscribe("output1")); + assert!(ctx.can_subscribe("output2")); + assert!(!ctx.can_subscribe("output3")); + assert!(ctx.can_publish("input")); + } + + #[test] + fn test_broadcast_prefix_matching() { + let claims = make_claims("/moq", vec!["audio"], vec![]); + let ctx = verify_moq_token(&claims, "/moq").unwrap(); + + // "audio" prefix allows "audio", "audio/left", "audio/right", etc. + assert!(ctx.can_subscribe("audio")); + assert!(ctx.can_subscribe("audio/left")); + assert!(ctx.can_subscribe("audio/stereo/left")); + assert!(!ctx.can_subscribe("video")); + // Note: "audiovisual" should NOT match because it's not a segment prefix + // moq_lite uses segment-based matching, not string prefix + assert!(!ctx.can_subscribe("audiovisual")); + } + + #[test] + fn test_moq_auth_context_debug() { + let claims = make_claims("/moq", vec![""], vec![""]); + let ctx = verify_moq_token(&claims, "/moq").unwrap(); + + // Just verify Debug is implemented + let debug_str = format!("{ctx:?}"); + assert!(debug_str.contains("MoqAuthContext")); + } +} diff --git a/apps/skit/src/auth/stores/file.rs b/apps/skit/src/auth/stores/file.rs new file mode 100644 index 00000000..25f4299a --- /dev/null +++ b/apps/skit/src/auth/stores/file.rs @@ -0,0 +1,691 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! File-based implementations of auth stores. + +use super::{ + AuthStoreError, Jwk, Jwks, KeyProvider, RevocationStore, SigningKeyMaterial, TokenMetadata, + TokenMetadataStore, VerificationKeyMaterial, +}; +use async_trait::async_trait; +use base64::Engine; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::sync::RwLock; +use std::time::{SystemTime, UNIX_EPOCH}; +use tracing::{debug, info, warn}; + +const PRIVATE_JWK_FILENAME: &str = "auth.jwk"; +const PUBLIC_JWKS_FILENAME: &str = "jwks.json"; + +/// Private JWK persisted to disk (contains the Ed25519 seed in `d`). +#[derive(Clone, Serialize, Deserialize)] +struct PrivateJwk { + kty: String, + crv: String, + #[serde(rename = "use")] + public_key_use: String, + alg: String, + kid: String, + x: String, + d: String, +} + +impl PrivateJwk { + fn validate(&self) -> Result<(), AuthStoreError> { + if self.kty != "OKP" { + return Err(AuthStoreError::InvalidKey(format!("Unsupported kty: {}", self.kty))); + } + if self.crv != "Ed25519" { + return Err(AuthStoreError::InvalidKey(format!("Unsupported crv: {}", self.crv))); + } + if self.alg != "EdDSA" { + return Err(AuthStoreError::InvalidKey(format!("Unsupported alg: {}", self.alg))); + } + if self.public_key_use != "sig" { + return Err(AuthStoreError::InvalidKey(format!( + "Unsupported JWK use: {}", + self.public_key_use + ))); + } + if self.kid.trim().is_empty() { + return Err(AuthStoreError::InvalidKey("Missing kid".to_string())); + } + if self.x.trim().is_empty() || self.d.trim().is_empty() { + return Err(AuthStoreError::InvalidKey("Missing key material (x/d)".to_string())); + } + Ok(()) + } + + fn to_public_jwk(&self) -> Jwk { + Jwk { + kty: self.kty.clone(), + crv: self.crv.clone(), + public_key_use: self.public_key_use.clone(), + alg: self.alg.clone(), + kid: self.kid.clone(), + x: self.x.clone(), + } + } +} + +fn lock_read(lock: &RwLock) -> std::sync::RwLockReadGuard<'_, T> { + lock.read().unwrap_or_else(std::sync::PoisonError::into_inner) +} + +fn lock_write(lock: &RwLock) -> std::sync::RwLockWriteGuard<'_, T> { + lock.write().unwrap_or_else(std::sync::PoisonError::into_inner) +} + +/// File-based key provider with rotation support. +/// +/// Stores private signing key in `auth.jwk` (0600) and public verification keys in `jwks.json`. +pub struct FileKeyProvider { + state_dir: PathBuf, + active: RwLock, + /// kid -> raw Ed25519 public key bytes (32 bytes) + public_keys: RwLock>>, + jwks: RwLock, +} + +impl FileKeyProvider { + /// Load existing keys or initialize with a new key. + /// + /// # Errors + /// + /// Returns errors for I/O failures, invalid permissions, or JSON parsing errors. + /// + /// # Panics + /// + /// Panics if the system random number generator fails (critical security failure). + #[allow(clippy::expect_used)] + pub async fn load_or_init(state_dir: &Path) -> Result { + // Ensure state directory exists + tokio::fs::create_dir_all(state_dir).await?; + + let private_path = state_dir.join(PRIVATE_JWK_FILENAME); + let jwks_path = state_dir.join(PUBLIC_JWKS_FILENAME); + + let (private_jwk, active_signing_key, public_key_bytes) = if private_path.exists() { + Self::verify_permissions(&private_path)?; + let content = tokio::fs::read_to_string(&private_path).await?; + let private: PrivateJwk = serde_json::from_str(&content)?; + private.validate()?; + + let seed = base64url_decode(&private.d)?; + let public_from_file = base64url_decode(&private.x)?; + + if seed.len() != 32 { + return Err(AuthStoreError::InvalidKey( + "Ed25519 seed must be 32 bytes".to_string(), + )); + } + if public_from_file.len() != 32 { + return Err(AuthStoreError::InvalidKey( + "Ed25519 public key must be 32 bytes".to_string(), + )); + } + + let (derived_public, pkcs8) = derive_keypair(&seed)?; + if derived_public != public_from_file { + return Err(AuthStoreError::InvalidKey( + "Public key in JWK does not match derived key".to_string(), + )); + } + + let kid = private.kid.clone(); + ( + private, + SigningKeyMaterial { kid, pkcs8: Arc::from(pkcs8.into_boxed_slice()) }, + Arc::from(public_from_file.into_boxed_slice()), + ) + } else { + let (private, signing_key, public_key) = generate_new_private_key()?; + Self::write_secure(&private_path, &serde_json::to_string_pretty(&private)?).await?; + info!(path = %private_path.display(), "Created new Ed25519 signing key"); + (private, signing_key, public_key) + }; + + let mut jwks = if jwks_path.exists() { + let content = tokio::fs::read_to_string(&jwks_path).await?; + serde_json::from_str::(&content)? + } else { + Jwks { keys: vec![] } + }; + + // Ensure active key is present in JWKS. + if !jwks.keys.iter().any(|k| k.kid == private_jwk.kid) { + jwks.keys.push(private_jwk.to_public_jwk()); + Self::write_secure(&jwks_path, &serde_json::to_string_pretty(&jwks)?).await?; + } + + // Build public key map (kid -> raw bytes) + let mut public_keys: HashMap> = HashMap::new(); + for jwk in &jwks.keys { + let bytes = base64url_decode(&jwk.x)?; + if bytes.len() != 32 { + return Err(AuthStoreError::InvalidKey(format!( + "Invalid public key length for kid {}", + jwk.kid + ))); + } + public_keys.insert(jwk.kid.clone(), Arc::from(bytes.into_boxed_slice())); + } + + // Ensure the active public key matches the private key. + if let Some(existing) = public_keys.get(&private_jwk.kid) { + if existing.as_ref() != public_key_bytes.as_ref() { + return Err(AuthStoreError::InvalidKey( + "JWKS entry for active kid does not match private key".to_string(), + )); + } + } else { + public_keys.insert(private_jwk.kid.clone(), public_key_bytes); + } + + debug!(active_kid = %active_signing_key.kid, num_keys = jwks.keys.len(), "Loaded JWKS"); + + Ok(Self { + state_dir: state_dir.to_path_buf(), + active: RwLock::new(active_signing_key), + public_keys: RwLock::new(public_keys), + jwks: RwLock::new(jwks), + }) + } + + /// Verify file has secure permissions (0600). + #[cfg(unix)] + fn verify_permissions(path: &Path) -> Result<(), AuthStoreError> { + use std::os::unix::fs::PermissionsExt; + + let metadata = std::fs::metadata(path)?; + let mode = metadata.permissions().mode() & 0o777; + + if mode != 0o600 { + return Err(AuthStoreError::InsecurePermissions { + path: path.display().to_string(), + actual: mode, + }); + } + Ok(()) + } + + #[cfg(not(unix))] + fn verify_permissions(_path: &Path) -> Result<(), AuthStoreError> { + // Non-Unix platforms: skip permission check + Ok(()) + } + + /// Write file with secure permissions (0600). + pub(crate) async fn write_secure(path: &Path, content: &str) -> Result<(), AuthStoreError> { + use tokio::io::AsyncWriteExt; + + // Write to a unique temp file first to avoid partially-written files. + let temp_path = path.with_extension(format!("tmp-{}", uuid::Uuid::new_v4())); + + #[cfg(unix)] + { + let mut file = tokio::fs::OpenOptions::new() + .write(true) + .create_new(true) + .mode(0o600) + .open(&temp_path) + .await?; + + file.write_all(content.as_bytes()).await?; + file.flush().await?; + drop(file); + } + + #[cfg(not(unix))] + { + let mut file = + tokio::fs::OpenOptions::new().write(true).create_new(true).open(&temp_path).await?; + + file.write_all(content.as_bytes()).await?; + file.flush().await?; + drop(file); + } + + // Atomic rename (same directory). + tokio::fs::rename(&temp_path, path).await?; + Ok(()) + } +} + +#[async_trait] +#[allow(clippy::expect_used)] +impl KeyProvider for FileKeyProvider { + fn active_key(&self) -> SigningKeyMaterial { + lock_read(&self.active).clone() + } + + fn verification_key(&self, kid: &str) -> Option { + let decoded = lock_read(&self.public_keys); + decoded + .get(kid) + .map(|public_key| VerificationKeyMaterial { public_key: public_key.clone() }) + } + + fn valid_kids(&self) -> Vec { + lock_read(&self.public_keys).keys().cloned().collect() + } + + fn jwks(&self) -> Jwks { + lock_read(&self.jwks).clone() + } + + async fn rotate(&self) -> Result { + let private_path = self.state_dir.join(PRIVATE_JWK_FILENAME); + let jwks_path = self.state_dir.join(PUBLIC_JWKS_FILENAME); + + let (private_jwk, new_signing_key, public_key_bytes) = generate_new_private_key()?; + + let mut jwks = lock_read(&self.jwks).clone(); + if !jwks.keys.iter().any(|k| k.kid == private_jwk.kid) { + jwks.keys.push(private_jwk.to_public_jwk()); + } + + // Persist JWKS first so the new kid becomes verifiable before switching active key. + Self::write_secure(&jwks_path, &serde_json::to_string_pretty(&jwks)?).await?; + Self::write_secure(&private_path, &serde_json::to_string_pretty(&private_jwk)?).await?; + + { + let mut active = lock_write(&self.active); + *active = new_signing_key.clone(); + } + { + let mut public_keys = lock_write(&self.public_keys); + public_keys.insert(private_jwk.kid.clone(), public_key_bytes); + } + { + let mut jwks_lock = lock_write(&self.jwks); + *jwks_lock = jwks.clone(); + } + + info!(kid = %new_signing_key.kid, total_keys = jwks.keys.len(), "Rotated signing key"); + + Ok(new_signing_key) + } +} + +fn base64url_encode(bytes: &[u8]) -> String { + base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes) +} + +fn base64url_decode(encoded: &str) -> Result, AuthStoreError> { + Ok(base64::engine::general_purpose::URL_SAFE_NO_PAD.decode(encoded)?) +} + +fn derive_keypair(seed: &[u8]) -> Result<(Vec, Vec), AuthStoreError> { + use aws_lc_rs::signature::{Ed25519KeyPair, KeyPair}; + + let key_pair = Ed25519KeyPair::from_seed_unchecked(seed) + .map_err(|e| AuthStoreError::InvalidKey(format!("Invalid Ed25519 seed: {e}")))?; + + let public_key = key_pair.public_key().as_ref().to_vec(); + let pkcs8 = key_pair + .to_pkcs8() + .map_err(|e| AuthStoreError::InvalidKey(format!("Failed to encode PKCS#8: {e}")))? + .as_ref() + .to_vec(); + + Ok((public_key, pkcs8)) +} + +fn generate_new_private_key() -> Result<(PrivateJwk, SigningKeyMaterial, Arc<[u8]>), AuthStoreError> +{ + let mut seed = [0u8; 32]; + getrandom::fill(&mut seed) + .map_err(|e| AuthStoreError::InvalidKey(format!("RNG failure: {e}")))?; + let kid = uuid::Uuid::new_v4().to_string(); + + let (public_key, pkcs8) = derive_keypair(&seed)?; + + let private = PrivateJwk { + kty: "OKP".to_string(), + crv: "Ed25519".to_string(), + public_key_use: "sig".to_string(), + alg: "EdDSA".to_string(), + kid: kid.clone(), + x: base64url_encode(&public_key), + d: base64url_encode(&seed), + }; + + let signing = SigningKeyMaterial { kid, pkcs8: Arc::from(pkcs8.into_boxed_slice()) }; + let public_key_arc: Arc<[u8]> = Arc::from(public_key.into_boxed_slice()); + + Ok((private, signing, public_key_arc)) +} + +/// File-based revocation store with in-memory lookup. +/// +/// Revocations are stored in `revoked.json` and loaded into memory at startup. +/// The `is_revoked` check is a fast HashSet lookup. +pub struct FileRevocationStore { + state_dir: PathBuf, + /// In-memory map for fast lookups (token_hash -> exp) + revoked: RwLock>, +} + +impl FileRevocationStore { + /// Create a new store and load existing revocations. + /// + /// # Errors + /// + /// Returns errors for I/O failures or JSON parsing errors. + pub async fn new(state_dir: &Path) -> Result { + tokio::fs::create_dir_all(state_dir).await?; + + let store = + Self { state_dir: state_dir.to_path_buf(), revoked: RwLock::new(HashMap::new()) }; + store.load().await?; + Ok(store) + } + + fn now_secs_lossy() -> u64 { + SystemTime::now().duration_since(UNIX_EPOCH).map(|d| d.as_secs()).unwrap_or_default() + } + + fn prune_expired_locked(map: &mut HashMap) { + let now = Self::now_secs_lossy(); + map.retain(|_, exp| *exp == 0 || *exp > now); + } + + /// Persist revocations atomically. + async fn persist(&self) -> Result<(), AuthStoreError> { + let data = { + let revoked = lock_read(&self.revoked); + serde_json::to_string_pretty(&*revoked)? + }; + let path = self.state_dir.join("revoked.json"); + FileKeyProvider::write_secure(&path, &data).await?; + + Ok(()) + } +} + +#[derive(Deserialize)] +#[serde(untagged)] +enum RevokedOnDisk { + Map(HashMap), + Set(HashSet), +} + +#[async_trait] +impl RevocationStore for FileRevocationStore { + fn is_revoked(&self, token_hash: &str) -> bool { + // Sync in-memory check (fast path, no await) + lock_read(&self.revoked).contains_key(token_hash) + } + + async fn revoke(&self, token_hash: &str, exp: u64) -> Result<(), AuthStoreError> { + lock_write(&self.revoked).insert(token_hash.to_string(), exp); + Self::prune_expired_locked(&mut lock_write(&self.revoked)); + self.persist().await?; + debug!(token_hash = %token_hash, "Token revoked"); + Ok(()) + } + + async fn load(&self) -> Result<(), AuthStoreError> { + let path = self.state_dir.join("revoked.json"); + if path.exists() { + FileKeyProvider::verify_permissions(&path)?; + let data = tokio::fs::read_to_string(&path).await?; + let revoked: RevokedOnDisk = serde_json::from_str(&data)?; + let mut map = match revoked { + RevokedOnDisk::Map(map) => map, + RevokedOnDisk::Set(set) => set.into_iter().map(|h| (h, 0)).collect(), + }; + Self::prune_expired_locked(&mut map); + let count = map.len(); + *lock_write(&self.revoked) = map; + debug!(count, "Loaded revocations from disk"); + } + Ok(()) + } +} + +/// File-based token metadata store. +/// +/// Stores metadata in `tokens.json`. This is used to: +/// - List all minted tokens for admin UI +/// - Enforce "tokens we mint" policy +/// - Track revocation status +pub struct FileTokenMetadataStore { + state_dir: PathBuf, + /// In-memory cache of all tokens (jti -> metadata) + tokens: RwLock>, +} + +impl FileTokenMetadataStore { + /// Create a new store and load existing metadata. + /// + /// # Errors + /// + /// Returns errors for I/O failures or JSON parsing errors. + pub async fn new(state_dir: &Path) -> Result { + tokio::fs::create_dir_all(state_dir).await?; + + let store = + Self { state_dir: state_dir.to_path_buf(), tokens: RwLock::new(HashMap::new()) }; + + // Load existing tokens + let path = state_dir.join("tokens.json"); + if path.exists() { + FileKeyProvider::verify_permissions(&path)?; + let data = tokio::fs::read_to_string(&path).await?; + let tokens: Vec = serde_json::from_str(&data)?; + let count = tokens.len(); + { + let mut map = lock_write(&store.tokens); + for token in tokens { + map.insert(token.jti.clone(), token); + } + } + debug!(count, "Loaded token metadata from disk"); + } + + Ok(store) + } + + /// Persist tokens atomically. + async fn persist(&self) -> Result<(), AuthStoreError> { + let list: Vec = lock_read(&self.tokens).values().cloned().collect(); + let data = serde_json::to_string_pretty(&list)?; + + let path = self.state_dir.join("tokens.json"); + FileKeyProvider::write_secure(&path, &data).await?; + + Ok(()) + } +} + +#[async_trait] +impl TokenMetadataStore for FileTokenMetadataStore { + async fn store(&self, meta: TokenMetadata) -> Result<(), AuthStoreError> { + let jti = meta.jti.clone(); + lock_write(&self.tokens).insert(jti.clone(), meta); + self.persist().await?; + debug!(jti = %jti, "Stored token metadata"); + Ok(()) + } + + async fn exists(&self, jti: &str) -> bool { + lock_read(&self.tokens).contains_key(jti) + } + + async fn list(&self) -> Result, AuthStoreError> { + Ok(lock_read(&self.tokens).values().cloned().collect()) + } + + async fn mark_revoked(&self, jti: &str) -> Result<(), AuthStoreError> { + { + let mut tokens = lock_write(&self.tokens); + if let Some(token) = tokens.get_mut(jti) { + token.revoked = true; + } else { + warn!(jti = %jti, "Attempted to mark non-existent token as revoked"); + } + } + self.persist().await?; + Ok(()) + } + + async fn get(&self, jti: &str) -> Result, AuthStoreError> { + Ok(lock_read(&self.tokens).get(jti).cloned()) + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[tokio::test(flavor = "multi_thread")] + async fn test_key_provider_init_and_active() { + let temp_dir = TempDir::new().unwrap(); + let provider = Arc::new(FileKeyProvider::load_or_init(temp_dir.path()).await.unwrap()); + + let provider_clone = provider.clone(); + let key = tokio::task::spawn_blocking(move || provider_clone.active_key()).await.unwrap(); + assert!(!key.kid.is_empty()); + assert!(!key.pkcs8.is_empty()); + + // Active key must be present in JWKS and verifiable via its kid. + let jwks = tokio::task::spawn_blocking({ + let provider = provider.clone(); + move || provider.jwks() + }) + .await + .unwrap(); + assert!(jwks.keys.iter().any(|jwk| jwk.kid == key.kid)); + + let verification = tokio::task::spawn_blocking({ + let provider = provider.clone(); + let kid = key.kid.clone(); + move || provider.verification_key(&kid) + }) + .await + .unwrap() + .unwrap(); + assert_eq!(verification.public_key.len(), 32); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_key_provider_rotation() { + let temp_dir = TempDir::new().unwrap(); + let provider = Arc::new(FileKeyProvider::load_or_init(temp_dir.path()).await.unwrap()); + + let old_key = tokio::task::spawn_blocking({ + let provider = provider.clone(); + move || provider.active_key() + }) + .await + .unwrap(); + let old_kid = old_key.kid.clone(); + + let old_public = tokio::task::spawn_blocking({ + let provider = provider.clone(); + let kid = old_kid.clone(); + move || provider.verification_key(&kid).unwrap().public_key + }) + .await + .unwrap(); + + let new_key = provider.rotate().await.unwrap(); + assert_ne!(old_kid, new_key.kid); + + // Old key should still be available for verification + let verification_key = tokio::task::spawn_blocking({ + let provider = provider.clone(); + let kid = old_kid.clone(); + move || provider.verification_key(&kid) + }) + .await + .unwrap(); + assert!(verification_key.is_some()); + assert_eq!(verification_key.unwrap().public_key.as_ref(), old_public.as_ref()); + + // New key should be active + let active = tokio::task::spawn_blocking({ + let provider = provider.clone(); + move || provider.active_key() + }) + .await + .unwrap(); + assert_eq!(active.kid, new_key.kid); + + // JWKS should contain both keys. + let jwks = tokio::task::spawn_blocking({ + let provider = provider.clone(); + move || provider.jwks() + }) + .await + .unwrap(); + assert!(jwks.keys.iter().any(|jwk| jwk.kid == old_kid)); + assert!(jwks.keys.iter().any(|jwk| jwk.kid == new_key.kid)); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_revocation_store() { + let temp_dir = TempDir::new().unwrap(); + let store = Arc::new(FileRevocationStore::new(temp_dir.path()).await.unwrap()); + + let token_hash = "test-hash-123"; + let store_clone = store.clone(); + let hash_clone = token_hash.to_string(); + let is_revoked = + tokio::task::spawn_blocking(move || store_clone.is_revoked(&hash_clone)).await.unwrap(); + assert!(!is_revoked); + + store.revoke(token_hash, 0).await.unwrap(); + + let store_clone = store.clone(); + let hash_clone = token_hash.to_string(); + let is_revoked = + tokio::task::spawn_blocking(move || store_clone.is_revoked(&hash_clone)).await.unwrap(); + assert!(is_revoked); + + // Reload and verify persistence + let store2 = Arc::new(FileRevocationStore::new(temp_dir.path()).await.unwrap()); + let hash_clone = token_hash.to_string(); + let is_revoked = + tokio::task::spawn_blocking(move || store2.is_revoked(&hash_clone)).await.unwrap(); + assert!(is_revoked); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_token_metadata_store() { + let temp_dir = TempDir::new().unwrap(); + let store = FileTokenMetadataStore::new(temp_dir.path()).await.unwrap(); + + let meta = TokenMetadata { + jti: "test-jti".to_string(), + token_hash: "abc123".to_string(), + token_type: super::super::TokenType::Api, + role: Some("admin".to_string()), + label: Some("Test token".to_string()), + created_at: 1_234_567_890, + exp: 1_234_657_890, + revoked: false, + created_by: "bootstrap".to_string(), + }; + + store.store(meta.clone()).await.unwrap(); + assert!(store.exists("test-jti").await); + assert!(!store.exists("nonexistent").await); + + let list = store.list().await.unwrap(); + assert_eq!(list.len(), 1); + assert_eq!(list[0].jti, "test-jti"); + + store.mark_revoked("test-jti").await.unwrap(); + let retrieved = store.get("test-jti").await.unwrap().unwrap(); + assert!(retrieved.revoked); + } +} diff --git a/apps/skit/src/auth/stores/mod.rs b/apps/skit/src/auth/stores/mod.rs new file mode 100644 index 00000000..ece518c1 --- /dev/null +++ b/apps/skit/src/auth/stores/mod.rs @@ -0,0 +1,191 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +//! Auth store traits for pluggable storage backends. +//! +//! This module defines the traits for key management, token revocation, +//! and token metadata storage. The default implementation uses the filesystem, +//! but these traits allow for alternative backends (e.g., Redis) in the future. + +mod file; + +pub use file::{FileKeyProvider, FileRevocationStore, FileTokenMetadataStore}; + +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +/// Active signing key material for JWT signing. +#[derive(Clone)] +pub struct SigningKeyMaterial { + /// Key identifier (for JWT `kid` header) + pub kid: String, + /// Ed25519 private key in PKCS#8 DER format (used for EdDSA signing). + pub pkcs8: Arc<[u8]>, +} + +/// Verification key material for JWT validation. +#[derive(Clone)] +pub struct VerificationKeyMaterial { + /// Ed25519 public key bytes (32 bytes, raw). + pub public_key: Arc<[u8]>, +} + +/// Errors that can occur in auth stores. +#[derive(Debug, thiserror::Error)] +pub enum AuthStoreError { + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("JSON error: {0}")] + Json(#[from] serde_json::Error), + + #[error("Base64 decode error: {0}")] + Base64(#[from] base64::DecodeError), + + #[error("Key not found: {0}")] + #[allow(dead_code)] + KeyNotFound(String), + + #[error("Invalid file permissions on {path}: expected 0600, got {actual:o}")] + InsecurePermissions { path: String, actual: u32 }, + + #[error("Invalid key data: {0}")] + InvalidKey(String), +} + +/// Public JWKS (JSON Web Key Set) served for verifier-only clients. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct Jwks { + pub keys: Vec, +} + +/// Public JWK for Ed25519 verification. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct Jwk { + pub kty: String, + pub crv: String, + #[serde(rename = "use")] + pub public_key_use: String, + pub alg: String, + pub kid: String, + pub x: String, +} + +/// Provides signing keys for JWT operations. +/// +/// Implementations must be thread-safe. Key rotation is supported by +/// maintaining multiple keys: one active key for signing new tokens, +/// and older keys for verifying existing tokens until they expire. +#[async_trait] +pub trait KeyProvider: Send + Sync { + /// Get the active signing key. + fn active_key(&self) -> SigningKeyMaterial; + + /// Get a verification key by kid. + /// + /// Returns `None` if the kid is not known (token might be from + /// a different server or the key has been removed). + fn verification_key(&self, kid: &str) -> Option; + + /// Get all valid key IDs (for JWT validation header checks). + fn valid_kids(&self) -> Vec; + + /// Get the public JWKS representing all verification keys. + fn jwks(&self) -> Jwks; + + /// Rotate keys: generate a new active key, keep old keys for verification. + /// + /// Returns the new active key material. + async fn rotate(&self) -> Result; +} + +/// Revocation store for invalidated tokens. +/// +/// The `is_revoked` method MUST be fast (in-memory lookup) as it's called +/// on every authenticated request. Implementations should load revocations +/// into memory at startup and persist changes atomically. +#[async_trait] +pub trait RevocationStore: Send + Sync { + /// Check if a token is revoked by its SHA-256 hash. + /// + /// This method must be fast (in-memory lookup) as it's called on every request. + fn is_revoked(&self, token_hash: &str) -> bool; + + /// Revoke a token by its SHA-256 hash. + /// + /// The `exp` parameter is the token's expiration time, which can be used + /// to automatically clean up expired revocations. + async fn revoke(&self, token_hash: &str, exp: u64) -> Result<(), AuthStoreError>; + + /// Load revocations from persistent storage. + async fn load(&self) -> Result<(), AuthStoreError>; +} + +/// Token type distinguishes API tokens from MoQ tokens. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +pub enum TokenType { + Api, + Moq, +} + +impl std::fmt::Display for TokenType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Api => write!(f, "api"), + Self::Moq => write!(f, "moq"), + } + } +} + +/// Metadata about a minted token (stored, never contains raw token). +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct TokenMetadata { + /// Unique token identifier (JWT `jti` claim) + pub jti: String, + /// SHA-256 hash of the token (hex-encoded) + pub token_hash: String, + /// Token type (API or MoQ) + pub token_type: TokenType, + /// Role name (for API tokens) + pub role: Option, + /// Human-readable label + pub label: Option, + /// Creation timestamp (Unix seconds) + pub created_at: u64, + /// Expiration timestamp (Unix seconds) + pub exp: u64, + /// Whether the token has been revoked + pub revoked: bool, + /// Identity that created this token (jti of the parent token, or "bootstrap") + pub created_by: String, +} + +/// Store for token metadata ("tokens we minted"). +/// +/// This store tracks all tokens minted by this server, enabling: +/// - Listing active tokens for admin UI +/// - Enforcing "tokens we mint" policy (reject unknown jtis) +/// - Tracking revocation status alongside other metadata +#[async_trait] +pub trait TokenMetadataStore: Send + Sync { + /// Store metadata when minting a new token. + async fn store(&self, meta: TokenMetadata) -> Result<(), AuthStoreError>; + + /// Check if a jti exists in our store. + /// + /// Used for "tokens we mint" enforcement: reject tokens whose + /// jti is not in our store. + async fn exists(&self, jti: &str) -> bool; + + /// List all tokens (for admin UI). + async fn list(&self) -> Result, AuthStoreError>; + + /// Mark a token as revoked in metadata. + async fn mark_revoked(&self, jti: &str) -> Result<(), AuthStoreError>; + + /// Get metadata for a specific token. + async fn get(&self, jti: &str) -> Result, AuthStoreError>; +} diff --git a/apps/skit/src/cli.rs b/apps/skit/src/cli.rs index 0af8b3aa..42b5da1f 100644 --- a/apps/skit/src/cli.rs +++ b/apps/skit/src/cli.rs @@ -3,7 +3,10 @@ // SPDX-License-Identifier: MPL-2.0 use clap::{Parser, Subcommand}; +use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION}; use schemars::schema_for; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; use tracing::{error, info, warn}; use crate::config; @@ -22,6 +25,24 @@ pub struct Cli { #[arg(short, long, default_value = "skit.toml")] pub config: String, + /// Server base URL for API calls (defaults to the configured bind address) + /// + /// Examples: + /// - `http://127.0.0.1:4545` + /// - `https://demo.streamkit.dev:4545/s/session_abc` + #[arg(long, env = "SKIT_SERVER_URL")] + pub server_url: Option, + + /// API token to authenticate CLI API calls (Bearer token) + /// + /// If not set, StreamKit will try to read `${auth.state_dir}/admin.token` from the config. + #[arg(long, env = "SKIT_TOKEN")] + pub token: Option, + + /// Path to a file containing an API token (Bearer token) + #[arg(long, env = "SKIT_TOKEN_FILE")] + pub token_file: Option, + #[command(subcommand)] pub command: Option, } @@ -33,6 +54,9 @@ pub enum Commands { /// Manage configuration #[command(subcommand)] Config(ConfigCommands), + /// Manage authentication + #[command(subcommand)] + Auth(AuthCommands), } #[derive(Subcommand, Debug)] @@ -43,6 +67,155 @@ pub enum ConfigCommands { Schema, } +#[derive(Subcommand, Debug)] +pub enum AuthCommands { + /// Print the bootstrap admin token path + /// + /// The admin token is automatically generated when auth is first initialized. + /// Use this command to find where the token is stored. + PrintAdminToken { + /// Print only the token (for scripting) + #[arg(long)] + raw: bool, + }, + /// Mint tokens (API or MoQ) and store metadata + /// + /// This is equivalent to creating tokens via the Web UI, and uses the HTTP API. + #[command(subcommand)] + Mint(MintTokenCommands), + /// Rotate the signing key and mint a new admin token + /// + /// This will: + /// 1. Generate a new signing key + /// 2. Keep the old key for validating existing tokens + /// 3. Mint a new admin token signed with the new key + /// 4. Write the new token to the state directory + RotateKey, +} + +#[derive(Subcommand, Debug)] +pub enum MintTokenCommands { + /// Mint an API token (aud: skit-api) + Api { + /// Role name (must exist in [permissions].roles) + #[arg(long)] + role: String, + /// Optional label for UI display + #[arg(long)] + label: Option, + /// TTL in seconds (defaults to auth.api_default_ttl_secs) + #[arg(long)] + ttl_secs: Option, + /// Output as JSON (useful for scripting) + #[arg(long)] + json: bool, + }, + /// Mint a MoQ token (aud: skit-moq) + /// + /// Notes: + /// - `--subscribe ''` / `--publish ''` (empty string) means "allow all" + /// - Omitting the flag entirely means "allow none" + #[cfg(feature = "moq")] + Moq { + /// URL path prefix the token applies to (e.g. /session/ or /moq/session1) + #[arg(long)] + root: String, + /// Allowed broadcast prefixes to subscribe to (repeatable) + #[arg(long)] + subscribe: Vec, + /// Allowed broadcast prefixes to publish to (repeatable) + #[arg(long)] + publish: Vec, + /// Optional label for UI display + #[arg(long)] + label: Option, + /// TTL in seconds (defaults to auth.moq_default_ttl_secs) + #[arg(long)] + ttl_secs: Option, + /// Output as JSON (useful for scripting) + #[arg(long)] + json: bool, + }, +} + +fn normalize_base_path_for_url(base_path: Option<&str>) -> String { + let Some(base_path) = base_path else { + return String::new(); + }; + + let trimmed = base_path.trim(); + if trimmed.is_empty() || trimmed == "/" { + return String::new(); + } + + let trimmed = trimmed.trim_end_matches('/'); + if trimmed.starts_with('/') { + trimmed.to_string() + } else { + format!("/{trimmed}") + } +} + +fn hostport_for_client(addr: SocketAddr) -> String { + // Binding to "0.0.0.0"/"[::]" means "all interfaces"; for a local client use localhost. + if addr.ip().is_unspecified() { + return format!("localhost:{}", addr.port()); + } + + if addr.is_ipv6() { + format!("[{}]:{}", addr.ip(), addr.port()) + } else { + format!("{}:{}", addr.ip(), addr.port()) + } +} + +fn default_server_url(config: &config::Config) -> Result { + let addr: SocketAddr = config + .server + .address + .parse() + .map_err(|e| format!("Invalid server.address '{}': {e}", config.server.address))?; + let scheme = if config.server.tls { "https" } else { "http" }; + let hostport = hostport_for_client(addr); + let base_path = normalize_base_path_for_url(config.server.base_path.as_deref()); + Ok(format!("{scheme}://{hostport}{base_path}")) +} + +fn read_token_file(path: &Path) -> Result { + std::fs::read_to_string(path) + .map(|s| s.trim().to_string()) + .map_err(|e| format!("Failed to read token file '{}': {e}", path.display())) +} + +fn resolve_cli_token(cli: &Cli, config: &config::Config) -> Result { + if let Some(token) = cli.token.as_deref() { + let token = token.trim(); + if token.is_empty() { + return Err("Empty --token".to_string()); + } + return Ok(token.to_string()); + } + + if let Some(path) = cli.token_file.as_deref() { + let token = read_token_file(Path::new(path))?; + if token.is_empty() { + return Err(format!("Token file '{path}' is empty")); + } + return Ok(token); + } + + let token_path = PathBuf::from(&config.auth.state_dir).join("admin.token"); + if token_path.exists() { + let token = read_token_file(&token_path)?; + if token.is_empty() { + return Err(format!("Bootstrap token file '{}' is empty", token_path.display())); + } + return Ok(token); + } + + Err("No token provided. Pass --token/--token-file (or set SKIT_TOKEN/SKIT_TOKEN_FILE), or run this command on the server host where `${auth.state_dir}/admin.token` is readable.".to_string()) +} + /// Initialize telemetry (metrics) if enabled in configuration /// Returns the meter provider that must be kept alive #[allow(clippy::collection_is_never_read)] // Meter provider must be kept alive @@ -150,6 +323,310 @@ fn handle_config_schema_command() { } } +/// Handle the "auth print-admin-token" command +// Allow println/eprintln for CLI output (intentional) +#[allow(clippy::disallowed_macros)] +fn handle_auth_print_admin_token(config_path: &str) { + let config_result = match config::load(config_path) { + Ok(result) => result, + Err(e) => { + eprintln!("Failed to load configuration: {e}"); + std::process::exit(1); + }, + }; + + let state_dir = std::path::Path::new(&config_result.config.auth.state_dir); + let token_path = state_dir.join("admin.token"); + + if token_path.exists() { + println!("Admin token location: {}", token_path.display()); + println!(); + // Try to read and print the token + match std::fs::read_to_string(&token_path) { + Ok(token) => { + println!("Token: {}", token.trim()); + }, + Err(e) => { + eprintln!("Warning: Could not read token file: {e}"); + eprintln!("The file exists but may have restricted permissions."); + }, + } + } else { + eprintln!("Admin token not found at: {}", token_path.display()); + eprintln!(); + eprintln!("The admin token is created when auth is first initialized."); + eprintln!("Start the server with auth enabled to generate it:"); + eprintln!(" - Bind to a non-loopback address (auth.mode=auto)"); + eprintln!(" - Or set auth.mode=enabled in your config"); + std::process::exit(1); + } +} + +/// Handle the "auth print-admin-token --raw" command +// Allow println/eprintln for CLI output (intentional) +#[allow(clippy::disallowed_macros)] +fn handle_auth_print_admin_token_raw(config_path: &str) { + let config_result = match config::load(config_path) { + Ok(result) => result, + Err(e) => { + eprintln!("Failed to load configuration: {e}"); + std::process::exit(1); + }, + }; + + let state_dir = std::path::Path::new(&config_result.config.auth.state_dir); + let token_path = state_dir.join("admin.token"); + + if !token_path.exists() { + eprintln!("Admin token not found at: {}", token_path.display()); + std::process::exit(1); + } + + match std::fs::read_to_string(&token_path) { + Ok(token) => println!("{}", token.trim()), + Err(e) => { + eprintln!("Failed to read token file: {e}"); + std::process::exit(1); + }, + } +} + +/// Handle the "auth rotate-key" command +// Allow println/eprintln for CLI output (intentional) +#[allow(clippy::disallowed_macros)] +async fn handle_auth_rotate_key(config_path: &str) { + let config_result = match config::load(config_path) { + Ok(result) => result, + Err(e) => { + eprintln!("Failed to load configuration: {e}"); + std::process::exit(1); + }, + }; + + // Initialize auth state (this will load existing keys) + let auth_state = match crate::auth::AuthState::new(&config_result.config.auth, true).await { + Ok(state) => state, + Err(e) => { + eprintln!("Failed to initialize auth: {e}"); + eprintln!(); + eprintln!("Make sure auth has been initialized by starting the server first."); + std::process::exit(1); + }, + }; + + // Rotate the key + match auth_state.rotate_key().await { + Ok(key_material) => { + println!("Key rotated successfully!"); + println!("New key ID: {}", key_material.kid); + println!(); + + // Mint a new admin token with the new key + match auth_state + .mint_api_token( + "admin", + Some("bootstrap-admin"), + config_result.config.auth.api_max_ttl_secs, + "cli-rotate-key", + ) + .await + { + Ok((token, _meta)) => { + // Write the new admin token + let state_dir = std::path::Path::new(&config_result.config.auth.state_dir); + let token_path = state_dir.join("admin.token"); + + match crate::auth::FileKeyProvider::write_secure(&token_path, &token).await { + Ok(()) => { + println!("New admin token written to: {}", token_path.display()); + println!(); + println!("Token: {token}"); + }, + Err(e) => { + eprintln!("Warning: Could not write token file: {e}"); + eprintln!("New admin token: {token}"); + }, + } + }, + Err(e) => { + eprintln!("Failed to mint new admin token: {e}"); + std::process::exit(1); + }, + } + }, + Err(e) => { + eprintln!("Failed to rotate key: {e}"); + std::process::exit(1); + }, + } +} + +/// Handle the "auth mint api/moq" commands +// Allow println/eprintln for CLI output (intentional) +#[allow(clippy::disallowed_macros)] +async fn handle_auth_mint_token(cli: &Cli, cmd: &MintTokenCommands) { + let config_result = match config::load(&cli.config) { + Ok(result) => result, + Err(e) => { + eprintln!("Failed to load configuration: {e}"); + std::process::exit(1); + }, + }; + + let server_url = match cli.server_url.as_deref() { + Some(url) => url.trim().trim_end_matches('/').to_string(), + None => match default_server_url(&config_result.config) { + Ok(url) => url, + Err(e) => { + eprintln!("{e}"); + std::process::exit(1); + }, + }, + }; + + let token = match resolve_cli_token(cli, &config_result.config) { + Ok(token) => token, + Err(e) => { + eprintln!("{e}"); + std::process::exit(1); + }, + }; + + let mut headers = HeaderMap::new(); + let Ok(auth_value) = HeaderValue::from_str(&format!("Bearer {token}")) else { + eprintln!("Invalid token (contains illegal header characters)"); + std::process::exit(1); + }; + headers.insert(AUTHORIZATION, auth_value); + + let client = reqwest::Client::new(); + + match cmd { + MintTokenCommands::Api { role, label, ttl_secs, json } => { + let body = crate::auth::handlers::CreateApiTokenRequest { + role: role.clone(), + label: label.clone().and_then(|s| { + let t = s.trim().to_string(); + if t.is_empty() { + None + } else { + Some(t) + } + }), + ttl_secs: *ttl_secs, + }; + + let url = format!("{server_url}/api/v1/auth/tokens"); + let resp = match client.post(&url).headers(headers.clone()).json(&body).send().await { + Ok(r) => r, + Err(e) => { + eprintln!("Failed to reach server at '{server_url}': {e}"); + std::process::exit(1); + }, + }; + + if !resp.status().is_success() { + let status = resp.status(); + let text = resp.text().await.unwrap_or_default(); + eprintln!("Token mint failed ({status}): {text}"); + std::process::exit(1); + } + + let out: crate::auth::handlers::CreateTokenResponse = match resp.json().await { + Ok(v) => v, + Err(e) => { + eprintln!("Failed to parse response JSON: {e}"); + std::process::exit(1); + }, + }; + + if *json { + match serde_json::to_string_pretty(&out) { + Ok(s) => println!("{s}"), + Err(e) => { + eprintln!("Failed to serialize JSON: {e}"); + std::process::exit(1); + }, + } + } else { + println!("Token: {}", out.token); + println!("jti: {}", out.jti); + println!("exp: {}", out.exp); + } + }, + #[cfg(feature = "moq")] + MintTokenCommands::Moq { root, subscribe, publish, label, ttl_secs, json } => { + let root_trimmed = root.trim(); + if root_trimmed.is_empty() { + eprintln!("Missing --root (use '/' to allow any path)"); + std::process::exit(1); + } + + let normalized_root = if root_trimmed.starts_with('/') { + root_trimmed.to_string() + } else { + format!("/{root_trimmed}") + }; + + let body = crate::auth::handlers::CreateMoqTokenRequest { + root: normalized_root, + subscribe: subscribe.clone(), + publish: publish.clone(), + label: label.clone().and_then(|s| { + let t = s.trim().to_string(); + if t.is_empty() { + None + } else { + Some(t) + } + }), + ttl_secs: *ttl_secs, + }; + + let url = format!("{server_url}/api/v1/auth/moq-tokens"); + let resp = match client.post(&url).headers(headers.clone()).json(&body).send().await { + Ok(r) => r, + Err(e) => { + eprintln!("Failed to reach server at '{server_url}': {e}"); + std::process::exit(1); + }, + }; + + if !resp.status().is_success() { + let status = resp.status(); + let text = resp.text().await.unwrap_or_default(); + eprintln!("MoQ token mint failed ({status}): {text}"); + std::process::exit(1); + } + + let out: crate::auth::handlers::CreateTokenResponse = match resp.json().await { + Ok(v) => v, + Err(e) => { + eprintln!("Failed to parse response JSON: {e}"); + std::process::exit(1); + }, + }; + + if *json { + match serde_json::to_string_pretty(&out) { + Ok(s) => println!("{s}"), + Err(e) => { + eprintln!("Failed to serialize JSON: {e}"); + std::process::exit(1); + }, + } + } else { + println!("Token: {}", out.token); + println!("jti: {}", out.jti); + println!("exp: {}", out.exp); + if let Some(url) = out.url_template.as_deref() { + println!("url: {url}"); + } + } + }, + } +} + /// Handle CLI commands // Allow eprintln/println before logging is initialized (for CLI output) #[allow(clippy::disallowed_macros)] @@ -164,5 +641,18 @@ pub async fn handle_command(cli: &Cli, init_logging: LogInitFn) { Commands::Config(ConfigCommands::Schema) => { handle_config_schema_command(); }, + Commands::Auth(AuthCommands::PrintAdminToken { raw }) => { + if *raw { + handle_auth_print_admin_token_raw(&cli.config); + } else { + handle_auth_print_admin_token(&cli.config); + } + }, + Commands::Auth(AuthCommands::Mint(cmd)) => { + handle_auth_mint_token(cli, cmd).await; + }, + Commands::Auth(AuthCommands::RotateKey) => { + handle_auth_rotate_key(&cli.config).await; + }, } } diff --git a/apps/skit/src/config.rs b/apps/skit/src/config.rs index c7ead7f9..4766088d 100644 --- a/apps/skit/src/config.rs +++ b/apps/skit/src/config.rs @@ -509,6 +509,89 @@ impl Default for ScriptConfig { } } +/// Authentication mode for the server. +#[derive(Deserialize, Serialize, Debug, Clone, Copy, Default, JsonSchema)] +#[serde(rename_all = "lowercase")] +pub enum AuthMode { + /// Auto: disabled on loopback, enabled on non-loopback + #[default] + Auto, + /// Always require authentication + Enabled, + /// Disable authentication entirely (NOT recommended for production) + Disabled, +} + +fn default_auth_state_dir() -> String { + ".streamkit/auth".to_string() +} + +fn default_auth_cookie_name() -> String { + "skit_session".to_string() +} + +const fn default_api_default_ttl() -> u64 { + 86400 // 24 hours +} + +const fn default_api_max_ttl() -> u64 { + 2_592_000 // 30 days +} + +const fn default_moq_default_ttl() -> u64 { + 3600 // 1 hour +} + +const fn default_moq_max_ttl() -> u64 { + 86400 // 1 day +} + +/// Authentication configuration for built-in JWT-based auth. +#[derive(Deserialize, Serialize, Debug, Clone, JsonSchema)] +pub struct AuthConfig { + /// Authentication mode (auto, enabled, disabled) + #[serde(default)] + pub mode: AuthMode, + + /// Directory for auth state (keys, tokens). Default: ".streamkit/auth" + #[serde(default = "default_auth_state_dir")] + pub state_dir: String, + + /// Cookie name for browser sessions. Default: "skit_session" + #[serde(default = "default_auth_cookie_name")] + pub cookie_name: String, + + /// Default TTL for API tokens in seconds. Default: 86400 (24 hours) + #[serde(default = "default_api_default_ttl")] + pub api_default_ttl_secs: u64, + + /// Maximum TTL for API tokens in seconds. Default: 2592000 (30 days) + #[serde(default = "default_api_max_ttl")] + pub api_max_ttl_secs: u64, + + /// Default TTL for MoQ tokens in seconds. Default: 3600 (1 hour) + #[serde(default = "default_moq_default_ttl")] + pub moq_default_ttl_secs: u64, + + /// Maximum TTL for MoQ tokens in seconds. Default: 86400 (1 day) + #[serde(default = "default_moq_max_ttl")] + pub moq_max_ttl_secs: u64, +} + +impl Default for AuthConfig { + fn default() -> Self { + Self { + mode: AuthMode::default(), + state_dir: default_auth_state_dir(), + cookie_name: default_auth_cookie_name(), + api_default_ttl_secs: default_api_default_ttl(), + api_max_ttl_secs: default_api_max_ttl(), + moq_default_ttl_secs: default_moq_default_ttl(), + moq_max_ttl_secs: default_moq_max_ttl(), + } + } +} + fn default_allowed_file_paths() -> Vec { vec!["samples/**".to_string()] } @@ -577,6 +660,9 @@ pub struct Config { #[serde(default)] pub script: ScriptConfig, + + #[serde(default)] + pub auth: AuthConfig, } #[derive(Debug)] diff --git a/apps/skit/src/lib.rs b/apps/skit/src/lib.rs index 838c15dd..9a711635 100644 --- a/apps/skit/src/lib.rs +++ b/apps/skit/src/lib.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: MPL-2.0 pub mod assets; +pub mod auth; pub mod cli; pub mod config; pub mod file_security; diff --git a/apps/skit/src/main.rs b/apps/skit/src/main.rs index 968ee20b..99f12324 100644 --- a/apps/skit/src/main.rs +++ b/apps/skit/src/main.rs @@ -31,6 +31,7 @@ static ALLOC: dhat::Alloc = dhat::Alloc; use clap::Parser; mod assets; +mod auth; mod cli; mod config; mod file_security; diff --git a/apps/skit/src/moq_gateway.rs b/apps/skit/src/moq_gateway.rs index 2ed1eb59..a81a78cf 100644 --- a/apps/skit/src/moq_gateway.rs +++ b/apps/skit/src/moq_gateway.rs @@ -68,6 +68,11 @@ impl MoqGateway { /// Handle an incoming WebTransport connection by routing it to the appropriate node /// + /// # Arguments + /// * `session` - The WebTransport session + /// * `path` - The URL path from the connection + /// * `auth` - Optional auth context (None when auth is disabled) + /// /// # Errors /// /// Returns an error if: @@ -81,6 +86,7 @@ impl MoqGateway { &self, session: moq_native::web_transport_quinn::Session, path: String, + auth: Option>, ) -> Result<(), String> { debug!(path = %path, "Received WebTransport connection"); @@ -120,7 +126,8 @@ impl MoqGateway { // Type-erase the WebTransport session let session_boxed: streamkit_core::moq_gateway::WebTransportSession = Box::new(session); - let conn = MoqConnection { path: path.clone(), session: session_boxed, response_tx }; + let conn = + MoqConnection { path: path.clone(), session: session_boxed, response_tx, auth }; // Send connection to the node if connection_tx.send(conn).is_err() { diff --git a/apps/skit/src/permissions.rs b/apps/skit/src/permissions.rs index 4eb8fd97..108f6b15 100644 --- a/apps/skit/src/permissions.rs +++ b/apps/skit/src/permissions.rs @@ -198,6 +198,47 @@ impl Permissions { } } + /// Create viewer role permissions (read-only access) + /// + /// Viewers can list and read sessions, nodes, and samples, but cannot + /// create, modify, or delete anything. This role is useful for monitoring + /// dashboards or read-only API access. + pub fn viewer() -> Self { + Self { + // Read-only: can view but not mutate + create_sessions: false, + destroy_sessions: false, + list_sessions: true, + modify_sessions: false, + tune_nodes: false, + load_plugins: false, + delete_plugins: false, + list_nodes: true, + list_samples: true, + read_samples: true, + write_samples: false, + delete_samples: false, + allowed_samples: vec![ + // Viewers can read standard samples + "oneshot/*.yml".to_string(), + "oneshot/*.yaml".to_string(), + "dynamic/*.yml".to_string(), + "dynamic/*.yaml".to_string(), + "user/*.yml".to_string(), + "user/*.yaml".to_string(), + ], + allowed_nodes: vec!["*".to_string()], // Can see all node types + allowed_plugins: vec!["*".to_string()], // Can see all plugins + access_all_sessions: false, // Can only view own sessions + upload_assets: false, + delete_assets: false, + allowed_assets: vec![ + // Viewers can see system audio assets + "samples/audio/system/*".to_string(), + ], + } + } + /// Convert to API PermissionsInfo (without allowlists) pub const fn to_info(&self) -> PermissionsInfo { PermissionsInfo { @@ -279,12 +320,13 @@ impl Permissions { /// Permission configuration section for skit.toml. #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] pub struct PermissionsConfig { - /// Default role for unauthenticated requests + /// Default role for requests without an authenticated role /// - /// Note: StreamKit does not implement authentication by itself; this value becomes the - /// effective role for any request that is not assigned a role by an external auth layer. - /// For production deployments, set this to a least-privileged role and put an auth layer - /// (or reverse proxy) in front of the server. + /// When built-in auth is disabled, this becomes the effective role for requests that are not + /// assigned a role via a trusted role header or `SK_ROLE`. + /// + /// For production deployments, prefer enabling built-in auth (`[auth].mode`) or running behind + /// an authenticating reverse proxy that sets `[permissions].role_header`. #[serde(default = "default_default_role")] pub default_role: String, @@ -298,11 +340,13 @@ pub struct PermissionsConfig { #[serde(default)] pub role_header: Option, - /// Allow starting the server on a non-loopback address without a trusted role header. + /// Allow starting the server on a non-loopback address without built-in auth or a trusted role + /// header. + /// + /// This only applies when built-in auth is disabled. /// - /// StreamKit does not implement authentication; without `role_header`, all requests fall back to - /// `SK_ROLE`/`default_role`. Binding to a non-loopback address without a trusted auth layer is - /// unsafe and the server will refuse to start unless this flag is set. + /// This is unsafe: all requests fall back to `SK_ROLE`/`default_role`. The server refuses to + /// start in this configuration unless this flag is set. #[serde(default)] pub allow_insecure_no_auth: bool, @@ -342,6 +386,7 @@ fn default_roles() -> HashMap { let mut roles = HashMap::new(); roles.insert("admin".to_string(), Permissions::admin()); roles.insert("user".to_string(), Permissions::user()); + roles.insert("viewer".to_string(), Permissions::viewer()); roles } diff --git a/apps/skit/src/server.rs b/apps/skit/src/server.rs index 8188ef28..dae49147 100644 --- a/apps/skit/src/server.rs +++ b/apps/skit/src/server.rs @@ -28,7 +28,7 @@ use std::time::Instant; use tower::limit::ConcurrencyLimitLayer; use tower::ServiceBuilder; use tower_http::{ - cors::{AllowOrigin, Any, CorsLayer}, + cors::{AllowHeaders, AllowOrigin, CorsLayer}, set_header::SetResponseHeaderLayer, trace::{DefaultOnFailure, DefaultOnResponse, TraceLayer}, }; @@ -92,6 +92,22 @@ async fn health_handler() -> impl IntoResponse { })) } +/// Serve the public JWKS (JSON Web Key Set) for verifying StreamKit-issued JWTs. +/// +/// Exposed at `/.well-known/jwks.json` when built-in auth is enabled. +async fn jwks_handler(State(app_state): State>) -> Response { + if !app_state.auth.is_enabled() { + return StatusCode::NOT_FOUND.into_response(); + } + + let Some(key_provider) = app_state.auth.key_provider() else { + return (StatusCode::SERVICE_UNAVAILABLE, "Auth key provider not available".to_string()) + .into_response(); + }; + + Json(key_provider.jwks()).into_response() +} + /// Type alias for a boxed byte stream used in media processing type MediaStream = Box> + Unpin + Send>; @@ -155,12 +171,10 @@ fn escape_html_attr(value: &str) -> String { out } -fn normalized_base_path_for_html(app_state: &AppState) -> Option { - app_state - .config - .server - .base_path - .as_deref() +const BUILTIN_AUTH_ROLE_HEADER: &str = "x-streamkit-role"; + +fn normalize_base_path(base_path: Option<&str>) -> Option { + base_path .map(str::trim) .and_then(|p| if p.is_empty() { None } else { Some(p) }) .map(|p| p.trim_end_matches('/')) @@ -168,6 +182,108 @@ fn normalized_base_path_for_html(app_state: &AppState) -> Option { .map(|p| if p.starts_with('/') { p.to_string() } else { format!("/{p}") }) } +fn normalized_base_path_for_html(app_state: &AppState) -> String { + normalize_base_path(app_state.config.server.base_path.as_deref()).unwrap_or_default() +} + +fn strip_base_path_prefix<'a>(path: &'a str, base_path: Option<&str>) -> &'a str { + let Some(base_path) = base_path else { + return path; + }; + + let base_path = base_path.trim().trim_end_matches('/'); + if base_path.is_empty() || base_path == "/" { + return path; + } + + // Normalize matching: config may specify base_path with or without a leading '/'. + if base_path.starts_with('/') { + let Some(rest) = path.strip_prefix(base_path) else { + return path; + }; + + if rest.is_empty() { + return "/"; + } + + // Only treat this as a base_path prefix if it ends on a boundary ("/" or exact match). + if rest.starts_with('/') { + return rest; + } + + return path; + } + + // base_path without leading slash: match against path after the initial '/' + if !path.starts_with('/') { + return path; + } + + let Some(rest) = path[1..].strip_prefix(base_path) else { + return path; + }; + + if rest.is_empty() { + return "/"; + } + + // Only treat this as a base_path prefix if it ends on a boundary ("/" or exact match). + if rest.starts_with('/') { + rest + } else { + path + } +} + +async fn auth_guard_middleware( + State(app_state): State>, + mut req: axum::http::Request, + next: Next, +) -> Response { + if !app_state.auth.is_enabled() { + return next.run(req).await; + } + + let raw_path = req.uri().path(); + let path = strip_base_path_prefix(raw_path, app_state.config.server.base_path.as_deref()); + + // Only guard API routes; static assets (UI) stay public and handle auth via /login. + if !path.starts_with("/api/") { + return next.run(req).await; + } + + // Auth endpoints handle their own auth semantics (login/me/logout). + if path.starts_with("/api/v1/auth/") { + return next.run(req).await; + } + + let auth_ctx = match crate::auth::validate_token_from_headers( + req.headers(), + &app_state.auth, + &app_state.config, + &app_state.config.permissions, + ) + .await + { + Ok(ctx) => ctx, + Err((status, msg)) => return (status, msg).into_response(), + }; + + // Inject the role into a trusted header so existing handlers can use RBAC without refactors. + // + // SECURITY: Always overwrite any incoming header of the same name. + let Ok(role_value) = header::HeaderValue::from_str(&auth_ctx.role) else { + return (StatusCode::INTERNAL_SERVER_ERROR, "Invalid role in token".to_string()) + .into_response(); + }; + // Header name is static and guaranteed to be valid. + #[allow(clippy::expect_used)] + let header_name = header::HeaderName::from_static(BUILTIN_AUTH_ROLE_HEADER); + req.headers_mut().insert(header_name, role_value); + + next.run(req).await +} + /// Best-effort Origin enforcement for browser security. /// /// This is NOT authentication. It is a defense-in-depth measure that mitigates @@ -185,7 +301,8 @@ async fn origin_guard_middleware( ) -> Response { use axum::http::Method; - let path = req.uri().path(); + let raw_path = req.uri().path(); + let path = strip_base_path_prefix(raw_path, app_state.config.server.base_path.as_deref()); let method = req.method().clone(); let is_api = path.starts_with("/api/"); @@ -220,19 +337,44 @@ async fn origin_guard_middleware( next.run(req).await } -fn create_cors_layer(config: &crate::config::CorsConfig) -> CorsLayer { +fn create_cors_layer( + config: &crate::config::CorsConfig, + auth_enabled: bool, +) -> Result { use axum::http::{HeaderValue, Method}; - // Check for wildcard (allow all) - if config.allowed_origins.iter().any(|o| o == "*") { - info!("CORS configured to allow all origins (permissive mode)"); - return CorsLayer::permissive(); + let has_wildcard = config.allowed_origins.iter().any(|o| o == "*"); + + // CRITICAL: Wildcard origins not allowed with credentials (browsers reject this) + if auth_enabled && has_wildcard { + return Err( + "CORS allowed_origins='*' is incompatible with auth (cookies require explicit origins). \ + Set allowed_origins to specific origins or disable auth.".to_string() + ); + } + + if has_wildcard { + info!("CORS configured to allow all origins (reflect Origin header)"); + // When credentials are enabled, `Access-Control-Allow-Origin: *` is invalid. + // Mirror the request origin instead. + return Ok(CorsLayer::new() + .allow_origin(AllowOrigin::mirror_request()) + .allow_methods([ + Method::GET, + Method::POST, + Method::PUT, + Method::DELETE, + Method::OPTIONS, + Method::PATCH, + ]) + .allow_headers(AllowHeaders::mirror_request()) + .allow_credentials(true)); } // If no origins specified, use default restrictive behavior if config.allowed_origins.is_empty() { info!("CORS configured with no allowed origins (most restrictive)"); - return CorsLayer::new(); + return Ok(CorsLayer::new()); } // Build list of patterns for matching @@ -240,6 +382,7 @@ fn create_cors_layer(config: &crate::config::CorsConfig) -> CorsLayer { info!( allowed_origins = ?patterns, + auth_enabled, "CORS configured with origin allowlist" ); @@ -252,7 +395,7 @@ fn create_cors_layer(config: &crate::config::CorsConfig) -> CorsLayer { patterns.iter().any(|pattern| origin_matches_pattern(origin_str, pattern)) }); - CorsLayer::new() + let mut layer = CorsLayer::new() .allow_origin(allow_origin) .allow_methods([ Method::GET, @@ -262,13 +405,36 @@ fn create_cors_layer(config: &crate::config::CorsConfig) -> CorsLayer { Method::OPTIONS, Method::PATCH, ]) - .allow_headers(Any) - .expose_headers(Any) + // When credentials are enabled, wildcard headers (`*`) are invalid. Mirror the + // preflight request headers instead. + .allow_headers(AllowHeaders::mirror_request()); + + // Enable credentials for browser clients. + // + // NOTE: The UI always uses `credentials: 'include'` so cookie auth works without query params. + // In dev (Vite), the UI is served cross-origin and talks directly to the backend, so CORS must + // allow credentials even when auth is disabled (otherwise browsers will block responses). + layer = layer.allow_credentials(true); + + Ok(layer) +} + +fn cors_allowed_origins_are_loopback_only(origins: &[String]) -> bool { + if origins.is_empty() { + return false; + } + + origins.iter().all(|pattern| { + origin_matches_pattern("http://localhost:80", pattern) + || origin_matches_pattern("http://127.0.0.1:80", pattern) + || origin_matches_pattern("https://localhost:443", pattern) + || origin_matches_pattern("https://127.0.0.1:443", pattern) + }) } #[cfg(test)] mod cors_tests { - use super::origin_matches_pattern; + use super::{create_cors_layer, origin_matches_pattern}; #[test] fn cors_wildcard_port_matches_localhost_port_only() { @@ -287,6 +453,18 @@ mod cors_tests { assert!(!origin_matches_pattern("https://example.com:443", "https://example.com")); assert!(!origin_matches_pattern("https://example.com", "https://example.com:*")); } + + #[test] + #[allow(clippy::unwrap_used)] + fn cors_layer_does_not_panic_when_credentials_enabled() { + let cors_config = crate::config::CorsConfig::default(); + let layer = create_cors_layer(&cors_config, false).unwrap(); + + // `CorsLayer` validates its configuration when layered; this should not panic. + let _app = axum::Router::<()>::new() + .route("/", axum::routing::get(|| async { "ok" })) + .layer(layer); + } } // File path validation lives in `crate::file_security` so it can be reused by both @@ -388,16 +566,39 @@ struct FrontendConfig { } /// Axum handler to get frontend configuration -async fn get_config_handler(State(app_state): State>) -> impl IntoResponse { - #[cfg(not(feature = "moq"))] - let _ = &app_state; +/// +/// Viewer role is denied - they cannot access server configuration. +async fn get_config_handler( + State(app_state): State>, + headers: HeaderMap, +) -> Result { + // Check auth and deny viewers + if app_state.auth.is_enabled() { + let auth_ctx = crate::auth::validate_token_from_headers( + &headers, + &app_state.auth, + &app_state.config, + &app_state.config.permissions, + ) + .await?; + + if auth_ctx.role == "viewer" { + return Err((StatusCode::FORBIDDEN, "Viewers cannot access config".to_string())); + } + } else { + // Auth disabled - still check role for viewer restriction + let (role, _) = crate::role_extractor::get_role_and_permissions(&headers, &app_state); + if role == "viewer" { + return Err((StatusCode::FORBIDDEN, "Viewers cannot access config".to_string())); + } + } let config = FrontendConfig { #[cfg(feature = "moq")] moq_gateway_url: app_state.config.server.moq_gateway_url.clone(), }; - Json(config) + Ok(Json(config)) } async fn list_plugins_handler( @@ -591,17 +792,41 @@ async fn list_packet_types_handler() -> impl IntoResponse { } /// Axum handler to get MoQ WebTransport certificate fingerprints +/// +/// Viewer role is denied - fingerprints are sensitive for MoQ connections. #[cfg(feature = "moq")] async fn get_moq_fingerprints_handler( State(app_state): State>, -) -> Result { + headers: HeaderMap, +) -> Result { + // Check auth and deny viewers + if app_state.auth.is_enabled() { + let auth_ctx = crate::auth::validate_token_from_headers( + &headers, + &app_state.auth, + &app_state.config, + &app_state.config.permissions, + ) + .await?; + + if auth_ctx.role == "viewer" { + return Err((StatusCode::FORBIDDEN, "Viewers cannot access fingerprints".to_string())); + } + } else { + // Auth disabled - still check role for viewer restriction + let (role, _) = crate::role_extractor::get_role_and_permissions(&headers, &app_state); + if role == "viewer" { + return Err((StatusCode::FORBIDDEN, "Viewers cannot access fingerprints".to_string())); + } + } + if let Some(gateway) = &app_state.moq_gateway { let fingerprints = gateway.get_fingerprints().await; Ok(Json(serde_json::json!({ "fingerprints": fingerprints }))) } else { - Err(StatusCode::SERVICE_UNAVAILABLE) + Err((StatusCode::SERVICE_UNAVAILABLE, "MoQ gateway not available".to_string())) } } @@ -1465,9 +1690,9 @@ async fn process_oneshot_pipeline_handler( // Enforce role-based access control for oneshot execution. // - // StreamKit does not implement authentication, but it does implement RBAC. - // Even for local demos, enforce the configured role/permissions so deployments - // can run safely behind a reverse proxy or other auth layer. + // Enforce RBAC for oneshot execution. When built-in auth is enabled, the request is first + // authenticated by `auth_guard_middleware`, which injects the resolved role into a trusted + // header so existing handlers can apply RBAC without refactors. let headers = req.headers().clone(); let (role_name, perms) = crate::role_extractor::get_role_and_permissions(&headers, &app_state); if !perms.create_sessions { @@ -1605,8 +1830,22 @@ async fn websocket_handler( } } - // Extract role name and permissions from headers - let (role_name, perms) = crate::role_extractor::get_role_and_permissions(&headers, &app_state); + // Require auth when enabled (cookie or Authorization header) + let (role_name, perms) = if app_state.auth.is_enabled() { + match crate::auth::validate_token_from_headers( + &headers, + &app_state.auth, + &app_state.config, + &app_state.config.permissions, + ) + .await + { + Ok(ctx) => (ctx.role, ctx.permissions), + Err((status, msg)) => return (status, msg).into_response(), + } + } else { + crate::role_extractor::get_role_and_permissions(&headers, &app_state) + }; ws.on_upgrade(move |socket| websocket::handle_websocket(socket, app_state, perms, role_name)) } @@ -1615,11 +1854,13 @@ async fn static_handler( State(app_state): State>, ) -> impl IntoResponse { let raw_path = uri.path(); - if raw_path.starts_with("/api/") { + let stripped_path = + strip_base_path_prefix(raw_path, app_state.config.server.base_path.as_deref()); + if stripped_path.starts_with("/api/") { return StatusCode::NOT_FOUND.into_response(); } - let path = raw_path.trim_start_matches('/'); + let path = stripped_path.trim_start_matches('/'); // If path is empty, serve index.html let path = if path.is_empty() { "index.html" } else { path }; @@ -1639,15 +1880,19 @@ async fn static_handler( if path == "index.html" { "no-cache" } else { "public, max-age=31536000, immutable" }; headers.insert(header::CACHE_CONTROL, axum::http::HeaderValue::from_static(cache_control)); - // Inject tag into index.html if base_path is configured + // Inject a tag into index.html for SPA routing. + // + // Vite builds use relative asset URLs (e.g. `./assets/...`). Without a `` tag, those + // URLs resolve relative to the current route (e.g. `/admin/assets/...`), which breaks when + // users deep-link to multi-segment routes like `/admin/tokens`. Injecting `` + // (or ``) fixes this. if path == "index.html" { - if let Some(base_path) = normalized_base_path_for_html(app_state.as_ref()) { - let base_path = escape_html_attr(&base_path); - let html = String::from_utf8_lossy(&content.data); - let injected = - html.replace("", &format!("\n ")); - return (headers, injected.into_bytes()).into_response(); - } + let base_path = normalized_base_path_for_html(app_state.as_ref()); + let base_path = escape_html_attr(&base_path); + let html = String::from_utf8_lossy(&content.data); + let injected = + html.replace("", &format!("\n ")); + return (headers, injected.into_bytes()).into_response(); } (headers, content.data).into_response() @@ -1673,16 +1918,12 @@ async fn static_handler( ); headers.insert(header::CACHE_CONTROL, axum::http::HeaderValue::from_static("no-cache")); - // Inject tag if base_path is configured - if let Some(base_path) = normalized_base_path_for_html(app_state.as_ref()) { - let base_path = escape_html_attr(&base_path); - let html = String::from_utf8_lossy(&content.data); - let injected = - html.replace("", &format!("\n ")); - return (headers, injected.into_bytes()).into_response(); - } - - (headers, content.data).into_response() + let base_path = normalized_base_path_for_html(app_state.as_ref()); + let base_path = escape_html_attr(&base_path); + let html = String::from_utf8_lossy(&content.data); + let injected = + html.replace("", &format!("\n ")); + (headers, injected.into_bytes()).into_response() } else { error!("FATAL: index.html not found in embedded assets!"); (StatusCode::INTERNAL_SERVER_ERROR, "index.html not found").into_response() @@ -1728,15 +1969,25 @@ async fn metrics_middleware(req: axum::http::Request, next: Next) -> Respo /// Creates the Axum application with all routes and middleware. /// +/// # Arguments +/// +/// * `config` - The server configuration +/// * `auth` - Optional pre-initialized AuthState. If None, creates a disabled auth state. +/// /// # Panics /// /// Panics if the plugin manager fails to initialize. This can happen if: /// - Plugin directories cannot be created due to filesystem permissions /// - Plugin directories exist but are not accessible +/// - CORS configuration is invalid (wildcard with auth enabled) /// /// Since this occurs during application initialization, a panic here is acceptable -/// as the server cannot function without plugin support. -pub fn create_app(config: Config) -> (Router, Arc) { +/// as the server cannot function without proper configuration. +#[allow(clippy::expect_used)] +pub fn create_app( + mut config: Config, + auth: Option>, +) -> (Router, Arc) { // --- Create the shared application state --- let (event_tx, _) = tokio::sync::broadcast::channel(128); @@ -1838,12 +2089,23 @@ pub fn create_app(config: Config) -> (Router, Arc) { Some(gateway) }; + // Use provided auth state or create disabled auth + let auth = auth.unwrap_or_else(|| Arc::new(crate::auth::AuthState::disabled())); + + // When built-in auth is enabled, treat the injected role header as the trusted role source. + // + // SECURITY: This header is overwritten by `auth_guard_middleware` for every API request. + if auth.is_enabled() { + config.permissions.role_header = Some(BUILTIN_AUTH_ROLE_HEADER.to_string()); + } + let app_state = Arc::new(AppState { engine, session_manager: Arc::new(tokio::sync::Mutex::new(SessionManager::default())), config: Arc::new(config), event_tx, plugin_manager, + auth, #[cfg(feature = "moq")] moq_gateway, }); @@ -1859,6 +2121,7 @@ pub fn create_app(config: Config) -> (Router, Arc) { let mut router = Router::new() .route("/healthz", get(health_handler)) .route("/health", get(health_handler)) + .route("/.well-known/jwks.json", get(jwks_handler)) .route("/api/v1/process", oneshot_route) .route( "/api/v1/plugins", @@ -1912,7 +2175,13 @@ pub fn create_app(config: Config) -> (Router, Arc) { router = router.route("/certificate.sha256", get(get_certificate_sha256_handler)); } - let cors_layer = create_cors_layer(&app_state.config.server.cors); + // Add auth routes + router = router.nest("/api/v1/auth", crate::auth::auth_router()); + + // Configure CORS with auth enabled state + let auth_enabled = app_state.auth.is_enabled(); + let cors_layer = create_cors_layer(&app_state.config.server.cors, auth_enabled) + .expect("CORS configuration error"); let router = router.fallback(static_handler); @@ -1938,6 +2207,7 @@ pub fn create_app(config: Config) -> (Router, Arc) { let router = router .with_state(Arc::clone(&app_state)) + .layer(middleware::from_fn_with_state(Arc::clone(&app_state), auth_guard_middleware)) .layer(middleware::from_fn_with_state(Arc::clone(&app_state), origin_guard_middleware)) .layer(ServiceBuilder::new().layer( TraceLayer::new_for_http() @@ -1985,6 +2255,8 @@ fn start_moq_webtransport_acceptor( return Ok(()); }; + let auth_state = Arc::clone(&app_state.auth); + // Parse address for WebTransport (UDP will use the same port as HTTP/HTTPS) let addr: SocketAddr = config.server.address.parse()?; @@ -2032,17 +2304,133 @@ fn start_moq_webtransport_acceptor( // Accept connections in a loop while let Some(request) = server.accept().await { let gateway = Arc::clone(&gateway); + let auth_state = Arc::clone(&auth_state); tokio::spawn(async move { match request { moq_native::Request::WebTransport(wt_request) => { - let path = wt_request.url().path().to_string(); + let url = wt_request.url(); + let path = url.path().to_string(); + + // SECURITY: Never log the full URL (may contain jwt) debug!(path = %path, "Received WebTransport connection request"); + // Validate MoQ auth if enabled + let moq_auth = if auth_state.is_enabled() { + // Extract jwt from query params + let jwt = url + .query_pairs() + .find(|(k, _)| k == "jwt") + .map(|(_, v)| v.to_string()); + + let Some(jwt) = jwt else { + warn!(path = %path, "MoQ auth failed: missing jwt parameter"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + }; + + // Validate JWT + let claims = match auth_state.validate_moq_token(&jwt) { + Ok(c) => c, + Err(e) => { + warn!(path = %path, error = %e, "MoQ JWT validation failed"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + }, + }; + + // Check audience + if claims.aud != crate::auth::AUD_MOQ { + warn!(path = %path, expected = crate::auth::AUD_MOQ, actual = %claims.aud, "MoQ auth failed: wrong audience"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + } + + let token_hash = crate::auth::hash_token(&jwt); + + // Enforce "tokens we mint" policy (parity with HTTP API auth). + let metadata_store = auth_state.token_metadata_store().cloned(); + let Some(metadata_store) = metadata_store else { + warn!(path = %path, "MoQ auth failed: token metadata store not available"); + let _ = wt_request + .close(axum::http::StatusCode::SERVICE_UNAVAILABLE) + .await; + return; + }; + + let meta = match metadata_store.get(&claims.jti).await { + Ok(Some(meta)) => meta, + Ok(None) => { + warn!(path = %path, jti = %claims.jti, "MoQ auth failed: token not recognized (not minted by this server)"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + }, + Err(e) => { + warn!(path = %path, error = %e, "MoQ auth failed: metadata store error"); + let _ = wt_request + .close(axum::http::StatusCode::SERVICE_UNAVAILABLE) + .await; + return; + }, + }; + + // Extra robustness: ensure the presented token matches the stored hash. + if meta.token_hash != token_hash { + warn!(path = %path, jti = %claims.jti, "MoQ auth failed: token hash mismatch"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + } + + if meta.revoked { + warn!(path = %path, jti = %claims.jti, "MoQ auth failed: token revoked"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + } + + // Check revocation + if auth_state.is_revoked(&token_hash) { + warn!(path = %path, "MoQ auth failed: token revoked"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + } + + // Verify root matches path and reduce permissions + match crate::auth::verify_moq_token(&claims, &path) { + Ok(ctx) => Some(Arc::new(ctx) + as Arc< + dyn streamkit_core::moq_gateway::MoqAuthChecker, + >), + Err(e) => { + warn!(path = %path, error = %e, "MoQ path verification failed"); + let _ = wt_request + .close(axum::http::StatusCode::UNAUTHORIZED) + .await; + return; + }, + } + } else { + None + }; + match wt_request.ok().await { Ok(session) => { - if let Err(e) = - gateway.accept_connection(session, path.clone()).await + if let Err(e) = gateway + .accept_connection(session, path.clone(), moq_auth) + .await { warn!(path = %path, error = %e, "Failed to route WebTransport connection"); } @@ -2086,18 +2474,134 @@ fn start_moq_webtransport_acceptor( /// - The Ctrl+C signal handler cannot be installed (critical OS failure) /// - The SIGTERM signal handler cannot be installed on Unix systems (critical OS failure) /// - The plugin manager fails to initialize (via `create_app`) +#[allow(clippy::cognitive_complexity)] pub async fn start_server(config: &Config) -> Result<(), Box> { - let (app, app_state) = create_app(config.clone()); + let addr: SocketAddr = config.server.address.parse()?; + + // Determine if auth should be enabled based on config mode and bind address + let auth_enabled = match config.auth.mode { + crate::config::AuthMode::Auto => !addr.ip().is_loopback(), + crate::config::AuthMode::Enabled => true, + crate::config::AuthMode::Disabled => false, + }; + + // Deployment footgun: cookie-based auth without TLS. + // + // When TLS is disabled, session cookies are set without the `Secure` attribute, so browsers + // may send them over plain HTTP. This is unsafe on untrusted networks. + if auth_enabled && !config.server.tls { + warn!( + mode = ?config.auth.mode, + address = %addr, + "Auth is enabled but TLS is disabled; session cookies will be set without the Secure attribute. \ + Enable TLS (server.tls=true) or terminate TLS in a trusted reverse proxy and ensure cookies are only used over HTTPS." + ); + } + + // Common migration footgun: deployments that previously relied on a reverse proxy setting a + // trusted role header may have `auth.mode=auto` and bind to a non-loopback address. + // + // In that case, built-in auth will turn on implicitly and override `permissions.role_header` + // (see `create_app`), which can break proxy-based auth unexpectedly. + if matches!(config.auth.mode, crate::config::AuthMode::Auto) + && auth_enabled + && config.permissions.role_header.is_some() + { + warn!( + mode = ?config.auth.mode, + address = %addr, + role_header = %config.permissions.role_header.as_deref().unwrap_or_default(), + "auth.mode=auto enabled built-in auth due to a non-loopback bind address, but permissions.role_header is set. \ + Built-in auth overrides role_header and ignores reverse-proxy role headers. \ + If you rely on proxy auth, set auth.mode=disabled." + ); + } + + // Validate CORS configuration early - fail if wildcard origins with auth enabled + let has_wildcard = config.server.cors.allowed_origins.iter().any(|o| o == "*"); + if auth_enabled && has_wildcard { + return Err( + "CORS allowed_origins='*' is incompatible with auth (cookies require explicit origins). \ + Set allowed_origins to specific origins or disable auth.".into() + ); + } + + // Common deploy footgun: auth enabled + localhost-only CORS allowlist. + // + // When auth is enabled, browser requests rely on cookie auth, which requires that the browser + // `Origin` be on the allowlist for mutating endpoints and the WebSocket control plane. If the + // server is reachable on a non-loopback address but the allowlist is still localhost-only, + // the UI will fail with 403. + if auth_enabled + && !addr.ip().is_loopback() + && cors_allowed_origins_are_loopback_only(&config.server.cors.allowed_origins) + { + warn!( + allowed_origins = ?config.server.cors.allowed_origins, + address = %addr, + "Auth is enabled, but server.cors.allowed_origins appears to be loopback-only; \ + browser requests from non-local origins will be rejected. \ + Configure [server.cors].allowed_origins for your deployment origin(s)." + ); + } + + // Initialize auth state + let auth = if auth_enabled { + info!( + mode = ?config.auth.mode, + state_dir = %config.auth.state_dir, + "Initializing authentication" + ); + match crate::auth::AuthState::new(&config.auth, true).await { + Ok(state) => { + info!("Authentication enabled and initialized"); + // Startup banner (no secrets): how to log in + verify tokens. + let scheme = if config.server.tls { "https" } else { "http" }; + let base_path = + normalize_base_path(config.server.base_path.as_deref()).unwrap_or_default(); + let login_path = format!("{base_path}/login"); + let ui_host = if addr.ip().is_unspecified() { + format!("localhost:{}", addr.port()) + } else { + addr.to_string() + }; + + let token_path = + std::path::PathBuf::from(&config.auth.state_dir).join("admin.token"); + if token_path.exists() { + info!(path = %token_path.display(), "Bootstrap admin token file"); + } else { + warn!(path = %token_path.display(), "Bootstrap admin token file missing"); + } + info!("To print the bootstrap token: skit auth print-admin-token"); + info!("Web UI login: {}://{}{}", scheme, ui_host, login_path); + info!("JWKS (public): {}://{}/.well-known/jwks.json", scheme, ui_host); + Arc::new(state) + }, + Err(e) => { + return Err(format!("Failed to initialize authentication: {e}").into()); + }, + } + } else { + info!( + mode = ?config.auth.mode, + is_loopback = addr.ip().is_loopback(), + "Authentication disabled" + ); + Arc::new(crate::auth::AuthState::disabled()) + }; + + let (app, app_state) = create_app(config.clone(), Some(auth)); #[cfg(not(feature = "moq"))] let _ = &app_state; - let addr: SocketAddr = config.server.address.parse()?; - if !addr.ip().is_loopback() && config.permissions.role_header.is_none() { + // Legacy role_header check - only applies when auth is disabled + if !auth_enabled && !addr.ip().is_loopback() && config.permissions.role_header.is_none() { if !config.permissions.allow_insecure_no_auth { return Err(format!( - "Refusing to start: server.address is '{addr}' (non-loopback) but permissions.role_header is not set. \ - StreamKit does not implement authentication; without a trusted auth layer, all requests fall back to SK_ROLE/default_role ('{}'). \ - Fix: put StreamKit behind an authenticating reverse proxy and set permissions.role_header, or (unsafe) set permissions.allow_insecure_no_auth = true to override.", + "Refusing to start: server.address is '{addr}' (non-loopback) but auth is disabled and permissions.role_header is not set. \ + Without built-in auth or a trusted auth layer, all requests fall back to SK_ROLE/default_role ('{}'). \ + Fix: enable auth (mode=enabled or mode=auto), put StreamKit behind an authenticating reverse proxy and set permissions.role_header, or (unsafe) set permissions.allow_insecure_no_auth = true to override.", config.permissions.default_role ) .into()); @@ -2106,7 +2610,7 @@ pub async fn start_server(config: &Config) -> Result<(), Box, pub event_tx: broadcast::Sender, pub plugin_manager: SharedUnifiedPluginManager, + pub auth: Arc, #[cfg(feature = "moq")] pub moq_gateway: Option>, } diff --git a/apps/skit/tests/auth_integration_test.rs b/apps/skit/tests/auth_integration_test.rs new file mode 100644 index 00000000..c609f969 --- /dev/null +++ b/apps/skit/tests/auth_integration_test.rs @@ -0,0 +1,207 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +#![allow( + clippy::unwrap_used, + clippy::expect_used, + clippy::disallowed_macros, + clippy::uninlined_format_args +)] + +use axum::http::StatusCode; +use reqwest::header::{HeaderValue, AUTHORIZATION, COOKIE}; +use serde_json::json; +use std::net::SocketAddr; +use std::sync::Arc; +use streamkit_server::Config; +use tempfile::TempDir; +use tokio::net::TcpListener; +use tokio::time::{sleep, Duration}; +use tokio_tungstenite::tungstenite::client::IntoClientRequest; + +async fn start_test_server_with_auth( +) -> Option<(SocketAddr, tokio::task::JoinHandle<()>, String, TempDir)> { + let listener = match TcpListener::bind("127.0.0.1:0").await { + Ok(listener) => listener, + Err(e) if e.kind() == std::io::ErrorKind::PermissionDenied => return None, + Err(e) => panic!("Failed to bind test server listener: {e}"), + }; + let addr = listener.local_addr().unwrap(); + + let temp_dir = TempDir::new().unwrap(); + + let mut config = Config::default(); + config.auth.mode = streamkit_server::config::AuthMode::Enabled; + config.auth.state_dir = temp_dir.path().to_string_lossy().to_string(); + + let auth_state = streamkit_server::auth::AuthState::new(&config.auth, true) + .await + .expect("Failed to init auth state"); + let auth_state = Arc::new(auth_state); + + let admin_token_path = temp_dir.path().join("admin.token"); + let admin_token = + tokio::fs::read_to_string(&admin_token_path).await.expect("Missing admin.token"); + let admin_token = admin_token.trim().to_string(); + + let (app, _state) = streamkit_server::server::create_app(config, Some(auth_state)); + let server_handle = tokio::spawn(async move { + axum::serve(listener, app.into_make_service()).await.unwrap(); + }); + + sleep(Duration::from_millis(50)).await; + Some((addr, server_handle, admin_token, temp_dir)) +} + +#[tokio::test] +async fn http_api_requires_auth_when_enabled() { + let Some((addr, server_handle, admin_token, _temp_dir)) = start_test_server_with_auth().await + else { + eprintln!("Skipping auth integration tests: local TCP bind not permitted"); + return; + }; + + let client = reqwest::Client::new(); + + // Health remains public + let res = + client.get(format!("http://{addr}/healthz")).send().await.expect("Failed to GET /healthz"); + assert_eq!(res.status(), StatusCode::OK); + + // /auth/me is public and reports unauthenticated when no token + let res = client + .get(format!("http://{addr}/api/v1/auth/me")) + .send() + .await + .expect("Failed to GET /api/v1/auth/me"); + assert_eq!(res.status(), StatusCode::OK); + let me: serde_json::Value = res.json().await.expect("Invalid JSON from /auth/me"); + assert_eq!(me["auth_enabled"], true); + assert_eq!(me["authenticated"], false); + + // Protected API should require auth + let res = client + .get(format!("http://{addr}/api/v1/permissions")) + .send() + .await + .expect("Failed to GET /api/v1/permissions"); + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + + // Authorization: Bearer should work + let res = client + .get(format!("http://{addr}/api/v1/permissions")) + .header(AUTHORIZATION, format!("Bearer {admin_token}")) + .send() + .await + .expect("Failed to GET /api/v1/permissions with bearer"); + assert_eq!(res.status(), StatusCode::OK); + + // Login should set session cookie + let res = client + .post(format!("http://{addr}/api/v1/auth/login")) + .json(&json!({ "token": admin_token })) + .send() + .await + .expect("Failed to POST /api/v1/auth/login"); + assert_eq!(res.status(), StatusCode::NO_CONTENT); + let set_cookie = res.headers().get("set-cookie").expect("Missing set-cookie").to_str().unwrap(); + let cookie_kv = set_cookie.split(';').next().expect("Invalid set-cookie"); + + // Cookie should authenticate + let res = client + .get(format!("http://{addr}/api/v1/permissions")) + .header(COOKIE, HeaderValue::from_str(cookie_kv).unwrap()) + .send() + .await + .expect("Failed to GET /api/v1/permissions with cookie"); + assert_eq!(res.status(), StatusCode::OK); + + server_handle.abort(); +} + +#[tokio::test] +async fn token_revocation_is_enforced() { + let Some((addr, server_handle, admin_token, _temp_dir)) = start_test_server_with_auth().await + else { + eprintln!("Skipping auth integration tests: local TCP bind not permitted"); + return; + }; + + let client = reqwest::Client::new(); + + // Mint a viewer token + let res = client + .post(format!("http://{addr}/api/v1/auth/tokens")) + .header(AUTHORIZATION, format!("Bearer {admin_token}")) + .json(&json!({ + "role": "viewer", + "label": "test-viewer", + "ttl_secs": 3600 + })) + .send() + .await + .expect("Failed to mint token"); + assert_eq!(res.status(), StatusCode::OK); + let body: serde_json::Value = res.json().await.expect("Invalid JSON from mint token"); + let viewer_token = body["token"].as_str().unwrap().to_string(); + let jti = body["jti"].as_str().unwrap().to_string(); + + // Viewer token should authenticate + let res = client + .get(format!("http://{addr}/api/v1/permissions")) + .header(AUTHORIZATION, format!("Bearer {viewer_token}")) + .send() + .await + .expect("Failed to call API with viewer token"); + assert_eq!(res.status(), StatusCode::OK); + let perms: serde_json::Value = res.json().await.expect("Invalid JSON from /permissions"); + assert_eq!(perms["role"], "viewer"); + + // Revoke token + let res = client + .delete(format!("http://{addr}/api/v1/auth/tokens/{jti}")) + .header(AUTHORIZATION, format!("Bearer {admin_token}")) + .send() + .await + .expect("Failed to revoke token"); + assert_eq!(res.status(), StatusCode::OK); + + // Revoked token should be rejected + let res = client + .get(format!("http://{addr}/api/v1/permissions")) + .header(AUTHORIZATION, format!("Bearer {viewer_token}")) + .send() + .await + .expect("Failed to call API with revoked token"); + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + + server_handle.abort(); +} + +#[tokio::test] +async fn websocket_requires_auth_when_enabled() { + let Some((addr, server_handle, admin_token, _temp_dir)) = start_test_server_with_auth().await + else { + eprintln!("Skipping auth integration tests: local TCP bind not permitted"); + return; + }; + + let ws_url = format!("ws://{addr}/api/v1/control"); + + // Unauthenticated websocket should fail + let err = tokio_tungstenite::connect_async(&ws_url).await.unwrap_err(); + let tokio_tungstenite::tungstenite::Error::Http(response) = err else { + panic!("Expected HTTP error, got: {err:?}"); + }; + assert_eq!(response.status(), 401); + + // Authenticated websocket should connect + let mut req = ws_url.into_client_request().unwrap(); + req.headers_mut() + .insert(AUTHORIZATION, HeaderValue::from_str(&format!("Bearer {admin_token}")).unwrap()); + + let (_ws, _) = tokio_tungstenite::connect_async(req).await.expect("WS connect failed"); + + server_handle.abort(); +} diff --git a/apps/skit/tests/base_path_routing_test.rs b/apps/skit/tests/base_path_routing_test.rs index 0d609a07..2399ccdf 100644 --- a/apps/skit/tests/base_path_routing_test.rs +++ b/apps/skit/tests/base_path_routing_test.rs @@ -30,7 +30,7 @@ async fn start_test_server_with_base_path( let mut config = Config::default(); config.server.base_path = Some(base_path); - let (app, _state) = streamkit_server::server::create_app(config); + let (app, _state) = streamkit_server::server::create_app(config, None); axum::serve(listener, app.into_make_service()).await.unwrap(); }); diff --git a/apps/skit/tests/end_to_end_test.rs b/apps/skit/tests/end_to_end_test.rs index e17ca52a..46dc91d3 100644 --- a/apps/skit/tests/end_to_end_test.rs +++ b/apps/skit/tests/end_to_end_test.rs @@ -30,7 +30,7 @@ async fn start_test_server() -> Option<(SocketAddr, tokio::task::JoinHandle<()>) // Start server in background using the existing listener let server_handle = tokio::spawn(async move { - let (app, _state) = streamkit_server::server::create_app(Config::default()); + let (app, _state) = streamkit_server::server::create_app(Config::default(), None); axum::serve(listener, app.into_make_service()).await.unwrap(); }); diff --git a/apps/skit/tests/http_origin_test.rs b/apps/skit/tests/http_origin_test.rs index 993acda8..4f1ba76c 100644 --- a/apps/skit/tests/http_origin_test.rs +++ b/apps/skit/tests/http_origin_test.rs @@ -18,7 +18,7 @@ async fn start_test_server() -> Option<(SocketAddr, tokio::task::JoinHandle<()>) let addr = listener.local_addr().unwrap(); let server_handle = tokio::spawn(async move { - let (app, _state) = streamkit_server::server::create_app(Config::default()); + let (app, _state) = streamkit_server::server::create_app(Config::default(), None); axum::serve(listener, app.into_make_service()).await.unwrap(); }); diff --git a/apps/skit/tests/http_sessions_test.rs b/apps/skit/tests/http_sessions_test.rs index 089c5544..13682b1a 100644 --- a/apps/skit/tests/http_sessions_test.rs +++ b/apps/skit/tests/http_sessions_test.rs @@ -24,7 +24,7 @@ async fn start_test_server() -> Option<(SocketAddr, tokio::task::JoinHandle<()>) let addr = listener.local_addr().unwrap(); let server_handle = tokio::spawn(async move { - let (app, _state) = streamkit_server::server::create_app(Config::default()); + let (app, _state) = streamkit_server::server::create_app(Config::default(), None); axum::serve(listener, app.into_make_service()).await.unwrap(); }); diff --git a/apps/skit/tests/plugin_integration_test.rs b/apps/skit/tests/plugin_integration_test.rs index b042e2fa..811a1c1f 100644 --- a/apps/skit/tests/plugin_integration_test.rs +++ b/apps/skit/tests/plugin_integration_test.rs @@ -64,7 +64,7 @@ impl TestServer { let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); let handle = tokio::spawn(async move { - let (app, _state) = streamkit_server::server::create_app(config); + let (app, _state) = streamkit_server::server::create_app(config, None); axum::serve(listener, app.into_make_service()) .with_graceful_shutdown(async move { let _ = shutdown_rx.await; diff --git a/apps/skit/tests/session_lifecycle_test.rs b/apps/skit/tests/session_lifecycle_test.rs index df1f8748..37cb572d 100644 --- a/apps/skit/tests/session_lifecycle_test.rs +++ b/apps/skit/tests/session_lifecycle_test.rs @@ -71,7 +71,7 @@ async fn start_test_server() -> Option<(SocketAddr, tokio::task::JoinHandle<()>) // Start server in background using the existing listener let server_handle = tokio::spawn(async move { - let (app, _state) = streamkit_server::server::create_app(Config::default()); + let (app, _state) = streamkit_server::server::create_app(Config::default(), None); axum::serve(listener, app.into_make_service()).await.unwrap(); }); diff --git a/apps/skit/tests/websocket_origin_test.rs b/apps/skit/tests/websocket_origin_test.rs index 3bee1ff7..b470e77d 100644 --- a/apps/skit/tests/websocket_origin_test.rs +++ b/apps/skit/tests/websocket_origin_test.rs @@ -19,7 +19,7 @@ async fn start_test_server() -> Option<(SocketAddr, tokio::task::JoinHandle<()>) let addr = listener.local_addr().unwrap(); let server_handle = tokio::spawn(async move { - let (app, _state) = streamkit_server::server::create_app(Config::default()); + let (app, _state) = streamkit_server::server::create_app(Config::default(), None); axum::serve(listener, app.into_make_service()).await.unwrap(); }); diff --git a/crates/core/src/moq_gateway.rs b/crates/core/src/moq_gateway.rs index ea23429e..0142c464 100644 --- a/crates/core/src/moq_gateway.rs +++ b/crates/core/src/moq_gateway.rs @@ -9,12 +9,25 @@ //! here in core to avoid circular dependencies. use async_trait::async_trait; +use std::fmt::Debug; use std::sync::Arc; use tokio::sync::mpsc; /// Opaque type for WebTransport session - actual type defined in moq-native pub type WebTransportSession = Box; +/// Trait for MoQ auth permission checking. +/// +/// This trait is implemented in the server crate with the full MoqAuthContext. +/// Nodes can use this trait to check permissions without knowing the implementation details. +pub trait MoqAuthChecker: Send + Sync + Debug { + /// Check if a broadcast name is allowed for subscribe. + fn can_subscribe(&self, broadcast: &str) -> bool; + + /// Check if a broadcast name is allowed for publish. + fn can_publish(&self, broadcast: &str) -> bool; +} + /// Result of attempting to handle a MoQ connection #[derive(Debug)] pub enum MoqConnectionResult { @@ -34,6 +47,10 @@ pub struct MoqConnection { /// Channel to send response back to gateway pub response_tx: tokio::sync::oneshot::Sender, + + /// Optional auth context for permission checking. + /// None when auth is disabled. + pub auth: Option>, } /// Gateway interface that nodes can use to register routes diff --git a/crates/nodes/src/transport/moq/mod.rs b/crates/nodes/src/transport/moq/mod.rs index 70e115cb..d8b52fa3 100644 --- a/crates/nodes/src/transport/moq/mod.rs +++ b/crates/nodes/src/transport/moq/mod.rs @@ -17,6 +17,7 @@ mod pull; mod push; use std::sync::OnceLock; +use url::Url; // Re-export public types pub use peer::{MoqPeerConfig, MoqPeerNode}; @@ -45,6 +46,53 @@ fn shared_insecure_client() -> Result { } } +pub(super) fn redact_url_str_for_logs(raw: &str) -> String { + raw.parse::().map_or_else( + |_| raw.split(['?', '#']).next().unwrap_or(raw).to_string(), + |url| redact_url_for_logs(&url), + ) +} + +pub(super) fn redact_url_for_logs(url: &Url) -> String { + let mut url = url.clone(); + url.set_query(None); + url.set_fragment(None); + url.to_string() +} + +pub(super) fn parse_moq_url(raw: &str, jwt: Option<&str>) -> Result { + let mut url: Url = raw.parse().map_err(|e| { + let redacted = redact_url_str_for_logs(raw); + StreamKitError::Configuration(format!("Failed to parse MoQ URL '{redacted}': {e}")) + })?; + + let Some(jwt) = jwt else { + return Ok(url); + }; + + let jwt = jwt.trim(); + if jwt.is_empty() { + return Err(StreamKitError::Configuration("MoQ jwt param must not be empty".to_string())); + } + + let existing: Vec<(String, String)> = url + .query_pairs() + .map(|(k, v)| (k.into_owned(), v.into_owned())) + .filter(|(k, _)| k != "jwt") + .collect(); + + { + let mut qp = url.query_pairs_mut(); + qp.clear(); + for (k, v) in existing { + qp.append_pair(&k, &v); + } + qp.append_pair("jwt", jwt); + } + + Ok(url) +} + /// Registers the MoQ transport nodes. /// /// # Panics diff --git a/crates/nodes/src/transport/moq/peer.rs b/crates/nodes/src/transport/moq/peer.rs index 6f5d7c4c..180c59fe 100644 --- a/crates/nodes/src/transport/moq/peer.rs +++ b/crates/nodes/src/transport/moq/peer.rs @@ -267,6 +267,27 @@ impl ProcessorNode for MoqPeerNode { tokio::select! { // Accept bidirectional peer connections on the base path Some(conn) = base_connection_rx.recv() => { + // Auth check: bidirectional needs both publish and subscribe permissions + if let Some(auth) = &conn.auth { + let input_bc = &self.config.input_broadcast; + let output_bc = &self.config.output_broadcast; + + if !auth.can_publish(input_bc) || !auth.can_subscribe(output_bc) { + tracing::warn!( + path = %conn.path, + input_broadcast = %input_bc, + output_broadcast = %output_bc, + "Rejecting bidirectional connection - missing publish or subscribe permission" + ); + let _ = conn.response_tx.send( + streamkit_core::moq_gateway::MoqConnectionResult::Rejected( + "Bidirectional requires both publish and subscribe permission".to_string() + ) + ); + continue; + } + } + tracing::info!(path = %conn.path, "Peer connecting"); let sub_count = subscriber_count.clone(); @@ -300,6 +321,25 @@ impl ProcessorNode for MoqPeerNode { // Accept publisher connections on /input path Some(conn) = input_connection_rx.recv() => { + // Auth check: publisher needs publish permission + if let Some(auth) = &conn.auth { + let input_bc = &self.config.input_broadcast; + + if !auth.can_publish(input_bc) { + tracing::warn!( + path = %conn.path, + broadcast = %input_bc, + "Rejecting publisher connection - publish permission denied" + ); + let _ = conn.response_tx.send( + streamkit_core::moq_gateway::MoqConnectionResult::Rejected( + format!("Publish permission denied for broadcast '{input_bc}'") + ) + ); + continue; + } + } + let Ok(permit) = publisher_slot.clone().try_acquire_owned() else { tracing::warn!(path = %conn.path, "Rejecting publisher connection - already have a publisher"); let _ = conn.response_tx.send( @@ -332,6 +372,25 @@ impl ProcessorNode for MoqPeerNode { // Accept subscriber connections on /output path Some(conn) = output_connection_rx.recv() => { + // Auth check: subscriber needs subscribe permission + if let Some(auth) = &conn.auth { + let output_bc = &self.config.output_broadcast; + + if !auth.can_subscribe(output_bc) { + tracing::warn!( + path = %conn.path, + broadcast = %output_bc, + "Rejecting subscriber connection - subscribe permission denied" + ); + let _ = conn.response_tx.send( + streamkit_core::moq_gateway::MoqConnectionResult::Rejected( + format!("Subscribe permission denied for broadcast '{output_bc}'") + ) + ); + continue; + } + } + tracing::info!(path = %conn.path, "Subscriber connecting"); let sub_count = subscriber_count.clone(); diff --git a/crates/nodes/src/transport/moq/pull.rs b/crates/nodes/src/transport/moq/pull.rs index b5078733..b92af619 100644 --- a/crates/nodes/src/transport/moq/pull.rs +++ b/crates/nodes/src/transport/moq/pull.rs @@ -21,6 +21,10 @@ use streamkit_core::{ #[serde(default)] pub struct MoqPullConfig { pub url: String, + /// Optional JWT for authenticated MoQ relays. When set, it is appended as `?jwt=...`. + /// + /// This is compatible with moq-relay and StreamKit's built-in MoQ auth. + pub jwt: Option, pub broadcast: String, /// Batch window in milliseconds. If > 0, after receiving a frame the node will /// wait up to this duration to collect additional frames before forwarding. @@ -99,7 +103,7 @@ impl ProcessorNode for MoqPullNode { ) -> Result { tracing::info!( node_id = %ctx.node_id, - url = %self.config.url, + url = %super::redact_url_str_for_logs(&self.config.url), broadcast = %self.config.broadcast, "MoqPullNode: Discovering tracks from broadcast catalog" ); @@ -145,7 +149,11 @@ impl ProcessorNode for MoqPullNode { async fn run(self: Box, mut context: NodeContext) -> Result<(), StreamKitError> { let node_name = context.output_sender.node_name().to_string(); state_helpers::emit_initializing(&context.state_tx, &node_name); - tracing::info!(url = %self.config.url, broadcast = %self.config.broadcast, "MoqPullNode starting"); + tracing::info!( + url = %super::redact_url_str_for_logs(&self.config.url), + broadcast = %self.config.broadcast, + "MoqPullNode starting" + ); state_helpers::emit_running(&context.state_tx, &node_name); let mut total_packet_count = 0; @@ -276,17 +284,12 @@ impl MoqPullNode { /// This is used during initialization to create output pins dynamically. async fn discover_tracks(&self) -> Result, StreamKitError> { tracing::info!( - url = %self.config.url, + url = %super::redact_url_str_for_logs(&self.config.url), broadcast = %self.config.broadcast, "Connecting to MoQ server to discover tracks" ); - let url = self.config.url.parse().map_err(|e| { - StreamKitError::Configuration(format!( - "Failed to parse MoQ URL '{}': {}", - self.config.url, e - )) - })?; + let url = super::parse_moq_url(&self.config.url, self.config.jwt.as_deref())?; let client = super::shared_insecure_client()?; @@ -424,12 +427,7 @@ impl MoqPullNode { context: &mut NodeContext, total_packet_count: &mut u32, ) -> Result { - let url = self.config.url.parse().map_err(|e| { - StreamKitError::Configuration(format!( - "Failed to parse MoQ URL '{}': {}", - self.config.url, e - )) - })?; + let url = super::parse_moq_url(&self.config.url, self.config.jwt.as_deref())?; let client = super::shared_insecure_client()?; diff --git a/crates/nodes/src/transport/moq/push.rs b/crates/nodes/src/transport/moq/push.rs index 666bbc7f..68ef0bc7 100644 --- a/crates/nodes/src/transport/moq/push.rs +++ b/crates/nodes/src/transport/moq/push.rs @@ -17,6 +17,10 @@ use streamkit_core::{ #[serde(default)] pub struct MoqPushConfig { pub url: String, + /// Optional JWT for authenticated MoQ relays. When set, it is appended as `?jwt=...`. + /// + /// This is compatible with moq-relay and StreamKit's built-in MoQ auth. + pub jwt: Option, pub broadcast: String, #[serde(default = "default_channels")] pub channels: u32, @@ -48,6 +52,7 @@ impl Default for MoqPushConfig { fn default() -> Self { Self { url: String::new(), + jwt: None, broadcast: String::new(), channels: 2, group_duration_ms: default_group_duration_ms(), @@ -85,15 +90,18 @@ impl ProcessorNode for MoqPushNode { let node_name = context.output_sender.node_name().to_string(); state_helpers::emit_initializing(&context.state_tx, &node_name); - let url = match self.config.url.parse() { + let url = match super::parse_moq_url(&self.config.url, self.config.jwt.as_deref()) { Ok(url) => url, Err(e) => { - let err_msg = format!("Failed to parse MoQ URL '{}': {}", self.config.url, e); - state_helpers::emit_failed(&context.state_tx, &node_name, &err_msg); - return Err(StreamKitError::Configuration(err_msg)); + state_helpers::emit_failed(&context.state_tx, &node_name, e.to_string()); + return Err(e); }, }; - tracing::info!(url = %self.config.url, broadcast = %self.config.broadcast, "MoqPushNode starting"); + tracing::info!( + url = %super::redact_url_str_for_logs(&self.config.url), + broadcast = %self.config.broadcast, + "MoqPushNode starting" + ); tracing::info!( group_duration_ms = self.config.group_duration_ms, initial_delay_ms = self.config.initial_delay_ms, diff --git a/deploy/systemd/skit.toml b/deploy/systemd/skit.toml index c881fe9c..4012a38f 100644 --- a/deploy/systemd/skit.toml +++ b/deploy/systemd/skit.toml @@ -8,6 +8,13 @@ [server] address = "127.0.0.1:4545" +[auth] +# Built-in auth is disabled on loopback by default (auth.mode=auto), but will auto-enable if you +# later bind to a non-loopback address. Keep auth state in the systemd StateDirectory so it +# persists across upgrades. +mode = "auto" +state_dir = "/var/lib/streamkit/auth" + [plugins] # Persist dynamically loaded plugins across upgrades. directory = "/var/lib/streamkit/plugins" @@ -16,4 +23,3 @@ directory = "/var/lib/streamkit/plugins" console_enable = true file_enable = false console_level = "info" - diff --git a/docker-skit-demo.toml b/docker-skit-demo.toml index 4182c224..dec62113 100644 --- a/docker-skit-demo.toml +++ b/docker-skit-demo.toml @@ -20,6 +20,12 @@ max_body_size = 104857600 # Override with SK_SERVER__MOQ_GATEWAY_URL env var for remote deployments moq_gateway_url = "http://127.0.0.1:4545/moq" +[auth] +# Built-in authentication is enabled by default when binding non-loopback addresses (auth.mode=auto). +mode = "auto" +# Keep auth state under /opt/streamkit so it's easy to persist via a single volume mount. +state_dir = "/opt/streamkit/.streamkit/auth" + [plugins] directory = "/opt/streamkit/plugins" @@ -117,8 +123,7 @@ url = "https://uselessfacts.jsph.pl/*" methods = ["GET"] [permissions] +# NOTE: When built-in auth is enabled, unauthenticated requests are rejected and `default_role` +# is not used. This only applies when auth is disabled. default_role = "user" -# Docker containers must bind to 0.0.0.0 for published ports to work. -# This is only safe when the published ports are bound to localhost (recommended), -# e.g. `-p 127.0.0.1:4545:4545/tcp -p 127.0.0.1:4545:4545/udp`, or otherwise firewalled. -allow_insecure_no_auth = true +# allow_insecure_no_auth = true # Unsafe: only enable if you intentionally disable auth on 0.0.0.0 diff --git a/docker-skit-gpu.toml b/docker-skit-gpu.toml index b50d5c12..747270b9 100644 --- a/docker-skit-gpu.toml +++ b/docker-skit-gpu.toml @@ -15,6 +15,12 @@ max_body_size = 104857600 # Override with SK_SERVER__MOQ_GATEWAY_URL env var for remote deployments moq_gateway_url = "http://127.0.0.1:4545/moq" +[auth] +# Built-in authentication is enabled by default when binding non-loopback addresses (auth.mode=auto). +mode = "auto" +# Keep auth state under /opt/streamkit so it's easy to persist via a single volume mount. +state_dir = "/opt/streamkit/.streamkit/auth" + [plugins] directory = "/opt/streamkit/plugins" @@ -95,8 +101,7 @@ url = "https://api.openai.com/v1/chat/completions" methods = ["POST"] [permissions] +# NOTE: When built-in auth is enabled, unauthenticated requests are rejected and `default_role` +# is not used. This only applies when auth is disabled. default_role = "user" -# Docker containers must bind to 0.0.0.0 for published ports to work. -# This is only safe when the published ports are bound to localhost (recommended), -# e.g. `-p 127.0.0.1:4545:4545/tcp -p 127.0.0.1:4545:4545/udp`, or otherwise firewalled. -allow_insecure_no_auth = true +# allow_insecure_no_auth = true # Unsafe: only enable if you intentionally disable auth on 0.0.0.0 diff --git a/docker-skit.toml b/docker-skit.toml index 15bc249e..57a211d5 100644 --- a/docker-skit.toml +++ b/docker-skit.toml @@ -17,6 +17,12 @@ max_body_size = 104857600 # Override with SK_SERVER__MOQ_GATEWAY_URL env var for remote deployments moq_gateway_url = "http://127.0.0.1:4545/moq" +[auth] +# Built-in authentication is enabled by default when binding non-loopback addresses (auth.mode=auto). +mode = "auto" +# Keep auth state under /opt/streamkit so it's easy to persist via a single volume mount. +state_dir = "/opt/streamkit/.streamkit/auth" + [plugins] directory = "/opt/streamkit/plugins" @@ -41,8 +47,7 @@ keep_models_loaded = true enabled = false [permissions] +# NOTE: When built-in auth is enabled, unauthenticated requests are rejected and `default_role` +# is not used. This only applies when auth is disabled. default_role = "user" -# Docker containers must bind to 0.0.0.0 for published ports to work. -# This is only safe when the published ports are bound to localhost (recommended), -# e.g. `-p 127.0.0.1:4545:4545/tcp -p 127.0.0.1:4545:4545/udp`, or otherwise firewalled. -allow_insecure_no_auth = true +# allow_insecure_no_auth = true # Unsafe: only enable if you intentionally disable auth on 0.0.0.0 diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs index 1436e219..9aa0a822 100644 --- a/docs/astro.config.mjs +++ b/docs/astro.config.mjs @@ -61,6 +61,7 @@ export default defineConfig({ { label: 'Observability', slug: 'guides/observability' }, { label: 'Script Node', slug: 'guides/script-node' }, { label: 'Using the Web UI', slug: 'guides/web-ui' }, + { label: 'Authentication', slug: 'guides/authentication' }, { label: 'Security', slug: 'guides/security' }, { label: 'Development Workflow', slug: 'guides/development' }, { label: 'Writing Plugins', slug: 'guides/writing-plugins' }, diff --git a/docs/src/content/docs/deployment/docker.md b/docs/src/content/docs/deployment/docker.md index 1af764c1..164b631e 100644 --- a/docs/src/content/docs/deployment/docker.md +++ b/docs/src/content/docs/deployment/docker.md @@ -18,6 +18,30 @@ docker run --rm \ skit serve # optional: this is the image default ``` +### Authenticate (built-in auth) + +The official images bind to `0.0.0.0:4545` inside the container so published ports work. That means built-in auth is enabled by default (`auth.mode = "auto"`). + +Print the bootstrap admin token and paste it into the Web UI at `/login`: + +```bash +docker exec skit auth print-admin-token +``` + +### Local demo without login (Linux-only) + +If you want a frictionless demo (no login) and you’re running on **Linux**, you can run StreamKit with host networking and bind to loopback inside the container. In `auth.mode = "auto"`, this keeps built-in auth **disabled**: + +```bash +TAG=v0.1.0 # replace with the latest release tag +docker run --rm -d --name streamkit \ + --network host \ + -e SK_SERVER__ADDRESS=127.0.0.1:4545 \ + ghcr.io/streamer45/streamkit:${TAG} +``` + +This mode is intended for local demos/dev only (it doesn’t work on Docker Desktop for macOS/Windows, and it changes the container isolation model). + ## Demo Image (Batteries Included) StreamKit also publishes a `-demo` image intended for demos/evaluation. It bundles core plugins plus the models needed by the shipped sample pipelines, so it should work out of the box (but is much larger than the slim images). @@ -29,6 +53,14 @@ docker run --rm \ ghcr.io/streamer45/streamkit:${TAG}-demo ``` +> [!NOTE] +> In Docker, StreamKit binds to `0.0.0.0` inside the container so published ports work. With `auth.mode=auto`, built-in auth is enabled by default. +> To log in, print the bootstrap admin token and paste it into the Web UI at `/login`: +> +> ```bash +> docker exec skit auth print-admin-token --raw +> ``` + If you want the OpenAI-powered sample pipelines, pass `OPENAI_API_KEY` without putting it directly in the command: ```bash @@ -72,7 +104,7 @@ gdb -p 1 > The official images ship with `/opt/streamkit/skit.toml` (see `docker-skit.toml` (CPU) / `docker-skit-gpu.toml` (GPU) in the repo). It binds to `0.0.0.0:4545` inside the container so published ports work, but you should publish/bind those ports to localhost (recommended) or otherwise firewall them. > [!CAUTION] -> StreamKit does not currently implement authentication. Do not expose it directly to untrusted networks. Put it behind an auth layer and configure a trusted role header. See [Security](/guides/security/). +> StreamKit ships with built-in authentication. Do not disable auth when exposing it to untrusted networks. See [Authentication](/guides/authentication/) and [Security](/guides/security/). ## Docker Compose @@ -89,12 +121,17 @@ services: command: ["skit", "serve"] restart: unless-stopped + # Recommended: persist auth state (keys + token index) across restarts. + # volumes: + # - streamkit-state:/opt/streamkit/.streamkit + # Optional: persist dynamically loaded plugins # Note: use a named volume so plugins persist across restarts. # volumes: # - streamkit-plugins:/opt/streamkit/plugins # volumes: +# streamkit-state: # streamkit-plugins: ``` @@ -144,6 +181,7 @@ docker build -f Dockerfile.gpu -t streamkit:gpu . | `/opt/streamkit/models` | ML models (Whisper, Kokoro) | | `/opt/streamkit/plugins` | Plugin directory (default in official Docker images) | | `/opt/streamkit/.plugins` | Optional plugin directory if you set `SK_PLUGINS__DIRECTORY=/opt/streamkit/.plugins` | +| `/opt/streamkit/.streamkit` | Auth state (keys, token index, bootstrap admin token) | | `/opt/streamkit/skit.toml` | Configuration file (default config shipped in the image) | ## Health Checks diff --git a/docs/src/content/docs/deployment/systemd.md b/docs/src/content/docs/deployment/systemd.md index edc78670..2cd4b2cb 100644 --- a/docs/src/content/docs/deployment/systemd.md +++ b/docs/src/content/docs/deployment/systemd.md @@ -42,6 +42,14 @@ This installs: By default the installed config binds to `127.0.0.1:4545`. If you want to expose StreamKit on the network, update `server.address` (and consider putting it behind a reverse proxy). +If you bind to a non-loopback address (e.g. `0.0.0.0:4545`), StreamKit enables built-in auth by default (`auth.mode = "auto"`). The bootstrap admin token is written to the auth state directory (recommended: `/var/lib/streamkit/auth/admin.token` for systemd installs). + +To print the bootstrap token: + +```bash +sudo -u streamkit /opt/streamkit/skit --config /etc/streamkit/skit.toml auth print-admin-token +``` + If you're using MoQ/WebTransport, that listener is QUIC/UDP on the **same port as** `[server].address`. A traditional HTTP reverse proxy (nginx/Caddy) will not handle the MoQ traffic natively; plan a QUIC/WebTransport-aware gateway or an L4 load balancer for UDP/QUIC, alongside your normal HTTP reverse proxy for the UI/API. ## Manage the service diff --git a/docs/src/content/docs/getting-started/installation.md b/docs/src/content/docs/getting-started/installation.md index c2420ec8..c4ef4e7c 100644 --- a/docs/src/content/docs/getting-started/installation.md +++ b/docs/src/content/docs/getting-started/installation.md @@ -71,7 +71,7 @@ just install-plugins StreamKit uses a TOML configuration file. By default `skit` reads `skit.toml` (or uses defaults if missing). > [!CAUTION] -> StreamKit does not currently implement authentication. If you expose the server beyond localhost, put it behind an authenticating reverse proxy (nginx/Caddy/etc) and configure a trusted role header. +> StreamKit ships with built-in authentication. If you expose the server beyond localhost, keep auth enabled (default in `auth.mode = "auto"`) and follow the [Authentication](/guides/authentication/) and [Security](/guides/security/) guides. ```toml [server] @@ -85,7 +85,7 @@ keep_models_loaded = true max_memory_mb = 8192 ``` -If you bind to a non-loopback address (e.g. `0.0.0.0:4545`), you must either configure a trusted role header (`[permissions].role_header`) behind an auth layer, or explicitly opt out with `[permissions].allow_insecure_no_auth = true` (unsafe). +If you bind to a non-loopback address (e.g. `0.0.0.0:4545`), StreamKit enables built-in auth by default (`[auth].mode = "auto"`). If you disable built-in auth, you must configure a trusted role header (`[permissions].role_header`) behind an auth layer, or explicitly opt out with `[permissions].allow_insecure_no_auth = true` (unsafe). Environment variables override config file settings: diff --git a/docs/src/content/docs/getting-started/quick-start.md b/docs/src/content/docs/getting-started/quick-start.md index 895497bf..d2344089 100644 --- a/docs/src/content/docs/getting-started/quick-start.md +++ b/docs/src/content/docs/getting-started/quick-start.md @@ -73,10 +73,26 @@ just skit serve ## Verify -Open [http://localhost:4545](http://localhost:4545) in your browser. You should see the StreamKit dashboard. +Open [http://localhost:4545](http://localhost:4545) in your browser. + +If you see the login screen, StreamKit’s built-in auth is enabled (Docker binds to `0.0.0.0` inside the container). Print the bootstrap admin token and paste it into the UI: + +```bash +docker exec streamkit skit auth print-admin-token +``` + +If you’re on **Linux** and want a frictionless demo (no login), you can run with host networking and bind to loopback inside the container. In `auth.mode = "auto"`, this keeps built-in auth **disabled**: + +```bash +TAG=v0.1.0 # replace with the latest release tag +docker run --rm -d --name streamkit \ + --network host \ + -e SK_SERVER__ADDRESS=127.0.0.1:4545 \ + ghcr.io/streamer45/streamkit:${TAG} +``` > [!CAUTION] -> StreamKit does not currently implement authentication. If you expose the server beyond localhost, put it behind an authenticating reverse proxy and configure roles via a trusted header. +> StreamKit ships with built-in authentication. If you expose the server beyond localhost, keep auth enabled (default in `auth.mode = "auto"`) and follow the [Authentication](/guides/authentication/) and [Security](/guides/security/) guides. ## Run Your First Pipeline diff --git a/docs/src/content/docs/guides/authentication.md b/docs/src/content/docs/guides/authentication.md new file mode 100644 index 00000000..ea65e653 --- /dev/null +++ b/docs/src/content/docs/guides/authentication.md @@ -0,0 +1,142 @@ +--- +# SPDX-FileCopyrightText: © 2025 StreamKit Contributors +# SPDX-License-Identifier: MPL-2.0 +title: Authentication +description: Built-in JWT auth for the API, Web UI, and MoQ/WebTransport +--- + +StreamKit ships with built-in JWT authentication for: + +- **HTTP API** (`/api/*`) +- **WebSocket control plane** +- **MoQ/WebTransport** (via a dedicated MoQ token) + +## Modes + +Configure built-in auth under `[auth]`: + +- `auto` (default): **disabled** on loopback binds (e.g. `127.0.0.1`), **enabled** on non-loopback binds (e.g. `0.0.0.0`) +- `enabled`: always require auth +- `disabled`: never require auth (not recommended outside localhost) + +```toml +[auth] +mode = "auto" # auto | enabled | disabled +``` + +## Bootstrap admin token + +When auth is enabled for the first time, StreamKit generates a **bootstrap admin token** and writes it to: + +- `${auth.state_dir}/admin.token` (default: `.streamkit/auth/admin.token`) +- `${auth.state_dir}/auth.jwk` (Ed25519 private key as a JWK, `0600`) +- `${auth.state_dir}/jwks.json` (public JWKS for verifying and key rotation) + +StreamKit enforces a “tokens we mint” policy for both API and MoQ tokens: a token is only accepted if its `jti` +exists in `${auth.state_dir}/tokens.json`. If you migrate or restore an instance, persist the entire +`${auth.state_dir}` directory (not just the signing key). + +To print it: + +```bash +skit auth print-admin-token +``` + +Rotate the signing key (and mint a new bootstrap token): + +```bash +skit auth rotate-key +``` + +## CLI token minting + +If you prefer not to use the Web UI, you can mint tokens directly via the CLI: + +```bash +# API token (aud: skit-api) +skit auth mint api --role admin --label "ci" --ttl-secs 3600 --json + +# MoQ token (aud: skit-moq) +# - empty string in --subscribe/--publish means "allow all" +skit auth mint moq --root /session/ --subscribe input --publish output --ttl-secs 3600 --json +``` + +`skit auth mint ...` uses the running server’s HTTP API, and authenticates using `--token` / `--token-file` +(or `${auth.state_dir}/admin.token` if readable on the host). + +### JWKS endpoint (public) + +When auth is enabled, StreamKit serves the public JWKS at: + +- `/.well-known/jwks.json` + +Verifier-only services (future control/media nodes, gateways, etc.) can use this to validate StreamKit-issued JWTs without having access to the private signing key. + +### Docker note + +In the official Docker images, `skit` runs from `/opt/streamkit`, so the default token path is: + +- `/opt/streamkit/.streamkit/auth/admin.token` + +For persistence across restarts, mount a volume for `/opt/streamkit/.streamkit` (or set `[auth].state_dir` to a mounted path). + +## Web UI login (browser) + +When auth is enabled: + +1. Open the Web UI. +2. You’ll be redirected to `/login`. +3. Paste an API token (e.g. the bootstrap admin token). + +StreamKit stores the session as an **HttpOnly cookie** (default name: `skit_session`), so the browser does not need to keep tokens in localStorage. + +## Token management UI (admin) + +When signed in as `admin`, open **Admin → Access Tokens** (`/admin/tokens`) to: + +- Mint additional API tokens (`admin` / `user` / `viewer`) with an optional label + TTL +- Mint MoQ/WebTransport tokens scoped by `root` and publish/subscribe permissions +- List and revoke previously minted tokens + +When built-in auth is disabled (loopback default), this UI is shown read-only (token minting is not needed). + +## API usage (non-browser clients) + +Send the token as a bearer header: + +```bash +curl -H "Authorization: Bearer $SKIT_TOKEN" http://127.0.0.1:4545/api/v1/auth/me +``` + +Admin instances can mint additional (time-bound) tokens via: + +- `POST /api/v1/auth/tokens` (API tokens) +- `POST /api/v1/auth/moq-tokens` (MoQ tokens) +- `DELETE /api/v1/auth/tokens/{jti}` (revoke) + +## MoQ/WebTransport tokens + +MoQ/WebTransport auth uses a separate JWT audience and is passed as a `?jwt=` query parameter on the gateway URL. Create one via `POST /api/v1/auth/moq-tokens` and connect with: + +`https://:/moq?...&jwt=` + +The token encodes a root path and publish/subscribe permissions (`put`/`get` in the JWT claims), and StreamKit enforces them before accepting the connection. + +### URL helper from `/api/v1/auth/moq-tokens` + +The MoQ mint endpoint returns a `url_template` helper: + +- If `[server].moq_gateway_url` is configured, `url_template` is a full absolute gateway URL with `?jwt=` appended. +- Otherwise, `url_template` is a **relative path** like `/?jwt=`; append it to your gateway base URL. + +## CORS + cookies + +If you use cookie auth from a browser, CORS must allow credentials. With auth enabled, `server.cors.allowed_origins = ["*"]` is rejected; configure explicit origins instead. + +When auth is disabled, `allowed_origins = ["*"]` is allowed (and the server reflects the request `Origin` so credentialed browser requests work), but it is not recommended outside local development. + +## Reverse proxy deployments + +You can still run StreamKit behind a reverse proxy for TLS, firewalling, rate limiting, etc. + +If you prefer **external authentication** instead of StreamKit’s built-in auth, set `auth.mode = "disabled"` and configure a trusted role header (`[permissions].role_header`) that your proxy sets after authenticating the caller. See the [Security guide](/guides/security/). diff --git a/docs/src/content/docs/guides/load-testing.md b/docs/src/content/docs/guides/load-testing.md index 9e09932d..02174fa2 100644 --- a/docs/src/content/docs/guides/load-testing.md +++ b/docs/src/content/docs/guides/load-testing.md @@ -17,7 +17,7 @@ Use these when you want to: - Start the server: `just skit serve` - Load tests use the client binary: `just skit-cli -- lt ` -- Some presets require a local MoQ relay at `http://localhost:4443` +- Some presets require a local MoQ relay at `http://localhost:4443` (if your relay requires auth, set `jwt` in the MoQ node params or include `?jwt=` in the `url`) ## Running Presets diff --git a/docs/src/content/docs/guides/security.md b/docs/src/content/docs/guides/security.md index d99d443f..e1ae1241 100644 --- a/docs/src/content/docs/guides/security.md +++ b/docs/src/content/docs/guides/security.md @@ -20,9 +20,9 @@ Set it to `true` only in trusted environments (e.g., local development, or behin ## Role-Based Permissions -StreamKit uses role-based access control (RBAC) to restrict what users can do. The built-in defaults assign unauthenticated requests the `admin` role (full access), but the official Docker images ship with `docker-skit.toml` which sets `default_role = "user"`. +StreamKit uses role-based access control (RBAC) to restrict what users can do. When built-in auth is disabled, roles are selected from a trusted header (optional), `SK_ROLE`, or `[permissions].default_role`. -StreamKit does not implement authentication. If you expose the server to untrusted clients, put it behind an authenticating reverse proxy (nginx/Caddy/etc) and configure a trusted role header. +StreamKit also ships with built-in JWT authentication (recommended for production). See [Authentication](/guides/authentication/) for how auth modes, bootstrap tokens, cookies, and token minting work. ### Configuring Roles diff --git a/docs/src/content/docs/guides/web-ui.md b/docs/src/content/docs/guides/web-ui.md index 49eb4b8a..7fde527d 100644 --- a/docs/src/content/docs/guides/web-ui.md +++ b/docs/src/content/docs/guides/web-ui.md @@ -17,6 +17,14 @@ just skit serve just dev # For development with hot reload ``` +If auth is enabled, you’ll be redirected to `/login`. Print the bootstrap admin token and paste it into the UI: + +```bash +skit auth print-admin-token +``` + +See the [Authentication guide](/guides/authentication/) for details. + ## Main Routes The Web UI has four main routes: diff --git a/docs/src/content/docs/guides/writing-plugins.md b/docs/src/content/docs/guides/writing-plugins.md index a54fb695..27e02aed 100644 --- a/docs/src/content/docs/guides/writing-plugins.md +++ b/docs/src/content/docs/guides/writing-plugins.md @@ -25,7 +25,7 @@ Both plugin types are uploaded via `POST /api/v1/plugins` (multipart field name Runtime plugin upload is powerful and dangerous: - Native plugins are arbitrary code execution in the server process. -- StreamKit does not implement authentication; use an authenticating reverse proxy and a trusted role header for access control. +- Treat runtime plugin upload/delete as an admin-only feature: use built-in authentication (recommended) or a trusted reverse proxy + role header for access control. - HTTP plugin upload/delete is globally disabled by default. To enable it, set `[plugins].allow_http_management = true` and ensure only trusted callers have the `load_plugins` / `delete_plugins` permissions. See the [Security guide](/guides/security/) for recommended deployment patterns. diff --git a/docs/src/content/docs/index.mdx b/docs/src/content/docs/index.mdx index 52500918..d7be5fb6 100644 --- a/docs/src/content/docs/index.mdx +++ b/docs/src/content/docs/index.mdx @@ -97,7 +97,7 @@ docker run --rm \ Then open [http://localhost:4545](http://localhost:4545) to access the web UI. > [!CAUTION] -> StreamKit does not currently implement authentication. Do not expose it directly to the public internet. Bind to localhost (recommended) or put it behind an authenticating reverse proxy and a trusted role header. See [Security](/guides/security/). +> StreamKit ships with built-in authentication (auto-enabled on non-loopback binds). Do not disable auth when exposing it beyond localhost; follow the [Authentication](/guides/authentication/) and [Security](/guides/security/) guides. > [!NOTE] > Some sample pipelines depend on models like Whisper (STT), Kokoro (TTS), and NLLB (translation). The official Docker images keep those out of the base image; mount them when needed. Some models may have restrictive licenses (e.g. NLLB is CC-BY-NC); review model licenses before production use. See [Docker Deployment](/deployment/docker/) and [Plugins Reference](/reference/plugins/) for details. diff --git a/docs/src/content/docs/reference/configuration-generated.md b/docs/src/content/docs/reference/configuration-generated.md index df63ab05..c00f086f 100644 --- a/docs/src/content/docs/reference/configuration-generated.md +++ b/docs/src/content/docs/reference/configuration-generated.md @@ -9,6 +9,20 @@ description: Auto-generated configuration reference from schema and defaults This page is auto-generated from the server's configuration schema and `Config::default()`. For a human-friendly guide and examples, see [Configuration](./configuration/). +## `[auth]` + +Authentication configuration for built-in JWT-based auth. + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `api_default_ttl_secs` | integer (uint64) | `86400` | Default TTL for API tokens in seconds. Default: 86400 (24 hours) | +| `api_max_ttl_secs` | integer (uint64) | `2592000` | Maximum TTL for API tokens in seconds. Default: 2592000 (30 days) | +| `cookie_name` | string | `skit_session` | Cookie name for browser sessions. Default: "skit_session" | +| `mode` | string | `auto` | Authentication mode for the server. | +| `moq_default_ttl_secs` | integer (uint64) | `3600` | Default TTL for MoQ tokens in seconds. Default: 3600 (1 hour) | +| `moq_max_ttl_secs` | integer (uint64) | `86400` | Maximum TTL for MoQ tokens in seconds. Default: 86400 (1 day) | +| `state_dir` | string | `.streamkit/auth` | Directory for auth state (keys, tokens). Default: ".streamkit/auth" | + ## `[engine]` Engine configuration for packet processing and buffering. @@ -41,8 +55,8 @@ Permission configuration section for skit.toml. | Option | Type | Default | Description | |--------|------|---------|-------------| -| `allow_insecure_no_auth` | boolean | `false` | Allow starting the server on a non-loopback address without a trusted role header. StreamKit does not implement authentication; without `role_header`, all requests fall back to `SK_ROLE`/`default_role`. Binding to a non-loopback address without a trusted auth layer is unsafe and the server will refuse to start unless this flag is set. | -| `default_role` | string | `admin` | Default role for unauthenticated requests Note: StreamKit does not implement authentication by itself; this value becomes the effective role for any request that is not assigned a role by an external auth layer. For production deployments, set this to a least-privileged role and put an auth layer (or reverse proxy) in front of the server. | +| `allow_insecure_no_auth` | boolean | `false` | Allow starting the server on a non-loopback address without built-in auth or a trusted role header. This only applies when built-in auth is disabled. This is unsafe: all requests fall back to `SK_ROLE`/`default_role`. The server refuses to start in this configuration unless this flag is set. | +| `default_role` | string | `admin` | Default role for requests without an authenticated role When built-in auth is disabled, this becomes the effective role for requests that are not assigned a role via a trusted role header or `SK_ROLE`. For production deployments, prefer enabling built-in auth (`[auth].mode`) or running behind an authenticating reverse proxy that sets `[permissions].role_header`. | | `max_concurrent_oneshots` | integer | null (uint) | `null` | Maximum concurrent oneshot pipelines (global limit) None = unlimited | | `max_concurrent_sessions` | integer | null (uint) | `null` | Maximum concurrent dynamic sessions (global limit, applies to all users) None = unlimited | | `role_header` | null | string | `null` | Optional trusted HTTP header used to select a role (e.g. "x-role" or "x-streamkit-role"). If unset, StreamKit ignores role headers entirely and uses `SK_ROLE`/`default_role`. Security note: Only enable this when running behind a trusted reverse proxy or auth layer that (a) authenticates the caller and (b) strips any incoming header with the same name before setting it. | @@ -186,6 +200,75 @@ Telemetry and observability configuration (OpenTelemetry, tokio-console). ], "type": "object" }, + "AuthConfig": { + "description": "Authentication configuration for built-in JWT-based auth.", + "properties": { + "api_default_ttl_secs": { + "default": 86400, + "description": "Default TTL for API tokens in seconds. Default: 86400 (24 hours)", + "format": "uint64", + "minimum": 0, + "type": "integer" + }, + "api_max_ttl_secs": { + "default": 2592000, + "description": "Maximum TTL for API tokens in seconds. Default: 2592000 (30 days)", + "format": "uint64", + "minimum": 0, + "type": "integer" + }, + "cookie_name": { + "default": "skit_session", + "description": "Cookie name for browser sessions. Default: \"skit_session\"", + "type": "string" + }, + "mode": { + "$ref": "#/$defs/AuthMode", + "default": "auto", + "description": "Authentication mode (auto, enabled, disabled)" + }, + "moq_default_ttl_secs": { + "default": 3600, + "description": "Default TTL for MoQ tokens in seconds. Default: 3600 (1 hour)", + "format": "uint64", + "minimum": 0, + "type": "integer" + }, + "moq_max_ttl_secs": { + "default": 86400, + "description": "Maximum TTL for MoQ tokens in seconds. Default: 86400 (1 day)", + "format": "uint64", + "minimum": 0, + "type": "integer" + }, + "state_dir": { + "default": ".streamkit/auth", + "description": "Directory for auth state (keys, tokens). Default: \".streamkit/auth\"", + "type": "string" + } + }, + "type": "object" + }, + "AuthMode": { + "description": "Authentication mode for the server.", + "oneOf": [ + { + "const": "auto", + "description": "Auto: disabled on loopback, enabled on non-loopback", + "type": "string" + }, + { + "const": "enabled", + "description": "Always require authentication", + "type": "string" + }, + { + "const": "disabled", + "description": "Disable authentication entirely (NOT recommended for production)", + "type": "string" + } + ] + }, "CorsConfig": { "description": "CORS configuration for cross-origin requests.", "properties": { @@ -496,12 +579,12 @@ Telemetry and observability configuration (OpenTelemetry, tokio-console). "properties": { "allow_insecure_no_auth": { "default": false, - "description": "Allow starting the server on a non-loopback address without a trusted role header.\n\nStreamKit does not implement authentication; without `role_header`, all requests fall back to\n`SK_ROLE`/`default_role`. Binding to a non-loopback address without a trusted auth layer is\nunsafe and the server will refuse to start unless this flag is set.", + "description": "Allow starting the server on a non-loopback address without built-in auth or a trusted role\nheader.\n\nThis only applies when built-in auth is disabled.\n\nThis is unsafe: all requests fall back to `SK_ROLE`/`default_role`. The server refuses to\nstart in this configuration unless this flag is set.", "type": "boolean" }, "default_role": { "default": "admin", - "description": "Default role for unauthenticated requests\n\nNote: StreamKit does not implement authentication by itself; this value becomes the\neffective role for any request that is not assigned a role by an external auth layer.\nFor production deployments, set this to a least-privileged role and put an auth layer\n(or reverse proxy) in front of the server.", + "description": "Default role for requests without an authenticated role\n\nWhen built-in auth is disabled, this becomes the effective role for requests that are not\nassigned a role via a trusted role header or `SK_ROLE`.\n\nFor production deployments, prefer enabling built-in auth (`[auth].mode`) or running behind\nan authenticating reverse proxy that sets `[permissions].role_header`.", "type": "string" }, "max_concurrent_oneshots": { @@ -584,7 +667,8 @@ Telemetry and observability configuration (OpenTelemetry, tokio-console). "core::script", "core::telemetry_tap", "core::telemetry_out", - "core::sink" + "core::sink", + "plugin::*" ], "allowed_plugins": [ "plugin::*" @@ -611,6 +695,40 @@ Telemetry and observability configuration (OpenTelemetry, tokio-console). "tune_nodes": true, "upload_assets": true, "write_samples": true + }, + "viewer": { + "access_all_sessions": false, + "allowed_assets": [ + "samples/audio/system/*" + ], + "allowed_nodes": [ + "*" + ], + "allowed_plugins": [ + "*" + ], + "allowed_samples": [ + "oneshot/*.yml", + "oneshot/*.yaml", + "dynamic/*.yml", + "dynamic/*.yaml", + "user/*.yml", + "user/*.yaml" + ], + "create_sessions": false, + "delete_assets": false, + "delete_plugins": false, + "delete_samples": false, + "destroy_sessions": false, + "list_nodes": true, + "list_samples": true, + "list_sessions": true, + "load_plugins": false, + "modify_sessions": false, + "read_samples": true, + "tune_nodes": false, + "upload_assets": false, + "write_samples": false } }, "description": "Map of role name -> permissions", @@ -921,6 +1039,18 @@ Telemetry and observability configuration (OpenTelemetry, tokio-console). "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "Root configuration for the StreamKit server.", "properties": { + "auth": { + "$ref": "#/$defs/AuthConfig", + "default": { + "api_default_ttl_secs": 86400, + "api_max_ttl_secs": 2592000, + "cookie_name": "skit_session", + "mode": "auto", + "moq_default_ttl_secs": 3600, + "moq_max_ttl_secs": 86400, + "state_dir": ".streamkit/auth" + } + }, "engine": { "$ref": "#/$defs/EngineConfig", "default": { @@ -1008,7 +1138,8 @@ Telemetry and observability configuration (OpenTelemetry, tokio-console). "core::script", "core::telemetry_tap", "core::telemetry_out", - "core::sink" + "core::sink", + "plugin::*" ], "allowed_plugins": [ "plugin::*" @@ -1035,6 +1166,40 @@ Telemetry and observability configuration (OpenTelemetry, tokio-console). "tune_nodes": true, "upload_assets": true, "write_samples": true + }, + "viewer": { + "access_all_sessions": false, + "allowed_assets": [ + "samples/audio/system/*" + ], + "allowed_nodes": [ + "*" + ], + "allowed_plugins": [ + "*" + ], + "allowed_samples": [ + "oneshot/*.yml", + "oneshot/*.yaml", + "dynamic/*.yml", + "dynamic/*.yaml", + "user/*.yml", + "user/*.yaml" + ], + "create_sessions": false, + "delete_assets": false, + "delete_plugins": false, + "delete_samples": false, + "destroy_sessions": false, + "list_nodes": true, + "list_samples": true, + "list_sessions": true, + "load_plugins": false, + "modify_sessions": false, + "read_samples": true, + "tune_nodes": false, + "upload_assets": false, + "write_samples": false } } } diff --git a/docs/src/content/docs/reference/configuration.md b/docs/src/content/docs/reference/configuration.md index 364094a9..5ea26320 100644 --- a/docs/src/content/docs/reference/configuration.md +++ b/docs/src/content/docs/reference/configuration.md @@ -86,15 +86,31 @@ Each plugin in `plugins[]`: - `params` (object?): params for the warmup instance - `fallback_params` (object?): fallback if primary params fail (e.g., GPU → CPU) +## `[auth]` + +Built-in JWT authentication for the HTTP API, WebSocket control plane, and MoQ/WebTransport. + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `mode` | string | `auto` | `auto` disables auth on loopback and enables it on non-loopback; `enabled` always requires auth; `disabled` turns auth off | +| `state_dir` | string | `.streamkit/auth` | Directory for auth state (keyring, token metadata, revocations, bootstrap admin token) | +| `cookie_name` | string | `skit_session` | HttpOnly cookie name for browser sessions | +| `api_default_ttl_secs` | int | `86400` | Default TTL for API tokens (seconds) | +| `api_max_ttl_secs` | int | `2592000` | Maximum TTL for API tokens (seconds) | +| `moq_default_ttl_secs` | int | `3600` | Default TTL for MoQ tokens (seconds) | +| `moq_max_ttl_secs` | int | `86400` | Maximum TTL for MoQ tokens (seconds) | + +See [Authentication](/guides/authentication/) for bootstrap and login flows. + ## `[permissions]` -Role-based access control. StreamKit does not implement authentication—use a reverse proxy or auth layer. +Role-based access control (RBAC). When built-in auth is disabled, these rules apply to every request based on the resolved role. | Option | Type | Default | Description | |--------|------|---------|-------------| -| `default_role` | string | `admin` | Role for unauthenticated requests | +| `default_role` | string | `admin` | Role used when built-in auth is disabled | | `role_header` | string? | `null` | Trusted header for role selection (only behind a proxy) | -| `allow_insecure_no_auth` | bool | `false` | Allow binding to a non-loopback address without a trusted role header (unsafe) | +| `allow_insecure_no_auth` | bool | `false` | Allow binding to a non-loopback address with auth disabled and no trusted role header (unsafe) | | `max_concurrent_sessions` | int? | `null` | Global limit for dynamic sessions | | `max_concurrent_oneshots` | int? | `null` | Global limit for oneshot requests | | `roles` | map | see below | Role name → permissions | diff --git a/docs/src/content/docs/reference/http-api.md b/docs/src/content/docs/reference/http-api.md index 776b663a..b8eb9896 100644 --- a/docs/src/content/docs/reference/http-api.md +++ b/docs/src/content/docs/reference/http-api.md @@ -7,6 +7,20 @@ description: REST endpoints for sessions, schemas, plugins, and oneshot processi Base URL (default): `http://127.0.0.1:4545` +## Authentication + +When built-in auth is enabled, all `/api/v1/*` endpoints require authentication (except `/healthz` and `/health`). + +- Non-browser clients: `Authorization: Bearer ` +- Browsers: log in via `/login` (StreamKit stores the JWT in an HttpOnly cookie) + +Example: + +```bash +TOKEN="$(skit auth print-admin-token --raw)" +curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:4545/api/v1/config +``` + ## Health - `GET /healthz` diff --git a/docs/src/content/docs/reference/nodes/transport-moq-publisher.md b/docs/src/content/docs/reference/nodes/transport-moq-publisher.md index ec287cae..4b0d68d4 100644 --- a/docs/src/content/docs/reference/nodes/transport-moq-publisher.md +++ b/docs/src/content/docs/reference/nodes/transport-moq-publisher.md @@ -28,6 +28,7 @@ No outputs. | `channels` | `integer (uint32)` | no | `2` | min: `0` | | `group_duration_ms` | `integer (uint64)` | no | `40` | Duration of each MoQ group in milliseconds.
Smaller groups = lower latency but more overhead.
Larger groups = higher latency but better efficiency.
Default: 40ms (2 Opus frames at 20ms each).
For real-time applications, use 20-60ms. For high-latency networks, use 100ms+.
min: `0` | | `initial_delay_ms` | `integer (uint64)` | no | `0` | Adds a timestamp offset (playout delay) so receivers can buffer before playback.

This is especially helpful when subscribers are on higher-latency / higher-jitter links,
and the client begins playback as soon as it sees the first frame.

Default: 0 (no added delay).
min: `0` | +| `jwt` | `null | string` | no | `null` | Optional JWT for authenticated MoQ relays. When set, it is appended as `?jwt=...`.

This is compatible with moq-relay and StreamKit's built-in MoQ auth. | | `url` | `string` | no | — | — | @@ -62,6 +63,14 @@ No outputs. "minimum": 0, "type": "integer" }, + "jwt": { + "default": null, + "description": "Optional JWT for authenticated MoQ relays. When set, it is appended as `?jwt=...`.\n\nThis is compatible with moq-relay and StreamKit's built-in MoQ auth.", + "type": [ + "string", + "null" + ] + }, "url": { "default": "", "type": "string" diff --git a/docs/src/content/docs/reference/nodes/transport-moq-subscriber.md b/docs/src/content/docs/reference/nodes/transport-moq-subscriber.md index e048edce..1526b468 100644 --- a/docs/src/content/docs/reference/nodes/transport-moq-subscriber.md +++ b/docs/src/content/docs/reference/nodes/transport-moq-subscriber.md @@ -26,6 +26,7 @@ No inputs. | --- | --- | --- | --- | --- | | `batch_ms` | `integer (uint64)` | no | `0` | Batch window in milliseconds. If > 0, after receiving a frame the node will
wait up to this duration to collect additional frames before forwarding.
Default: 0 (no batching) - recommended because moq_lite's TrackConsumer::read()
has internal allocation overhead that makes batching counterproductive.
min: `0` | | `broadcast` | `string` | no | — | — | +| `jwt` | `null | string` | no | `null` | Optional JWT for authenticated MoQ relays. When set, it is appended as `?jwt=...`.

This is compatible with moq-relay and StreamKit's built-in MoQ auth. | | `url` | `string` | no | — | — | @@ -47,6 +48,14 @@ No inputs. "default": "", "type": "string" }, + "jwt": { + "default": null, + "description": "Optional JWT for authenticated MoQ relays. When set, it is appended as `?jwt=...`.\n\nThis is compatible with moq-relay and StreamKit's built-in MoQ auth.", + "type": [ + "string", + "null" + ] + }, "url": { "default": "", "type": "string" diff --git a/docs/src/content/docs/reference/plugins/index.md b/docs/src/content/docs/reference/plugins/index.md index e5160125..630e0523 100644 --- a/docs/src/content/docs/reference/plugins/index.md +++ b/docs/src/content/docs/reference/plugins/index.md @@ -14,9 +14,6 @@ curl http://localhost:4545/api/v1/plugins curl http://localhost:4545/api/v1/schema/nodes | jq '.[] | select(.kind | startswith("plugin::"))' ``` -> [!NOTE] -> The second command requires `jq`. - ## Official plugins (8) - [`plugin::native::helsinki`](./plugin-native-helsinki/) (original kind: `helsinki`) diff --git a/e2e/README.md b/e2e/README.md index 5211d2b3..5b293e6a 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -40,6 +40,16 @@ E2E_BASE_URL=http://localhost:4545 bun run test:only just e2e-external http://localhost:4545 ``` +### External server with built-in auth enabled + +If the external server has built-in auth enabled, the auth E2E tests need an admin token. + +Options: + +- Set `E2E_ADMIN_TOKEN` explicitly, or +- Set `E2E_AUTH_STATE_DIR` to the server's auth state directory (the tests will read `admin.token` from it), or +- Use the default local state dir `.streamkit/auth` (the tests will try `../.streamkit/auth/admin.token` when running from `e2e/`). + ## Running Against Vite Dev Server To test against the Vite development server (useful for debugging UI changes): @@ -66,6 +76,7 @@ Both servers must be running for tests to pass. - `tests/design.spec.ts` - Design view tests (canvas, samples, YAML editor) - `tests/monitor.spec.ts` - Monitor view tests (session lifecycle) +- `tests/auth.spec.ts` - Built-in auth flow tests (login + cookie + logout) ## Server Harness @@ -83,6 +94,26 @@ Environment variables set by harness: - `SK_LOG__FILE_ENABLE=false` - Disable file logging - `RUST_LOG=warn` - Reduce log noise +### Auth-enabled runs + +To run the E2E suite with StreamKit's built-in auth enabled: + +```bash +just e2e-auth + +# Or directly: +cd e2e +E2E_AUTH=1 bun run test +``` + +When `E2E_AUTH=1` (or `E2E_AUTH_MODE=enabled`) is set, the harness: + +- Starts skit with `SK_AUTH__MODE=enabled` +- Uses an isolated auth state directory under `target/` +- Reads the generated bootstrap token from `admin.token` and exposes it to tests as `E2E_ADMIN_TOKEN` + +Set `E2E_KEEP_AUTH_STATE=1` to keep the temporary auth state directory for debugging. + ## Scripts | Script | Description | diff --git a/e2e/src/harness/run.ts b/e2e/src/harness/run.ts index 6cdf066f..5bfedcc6 100644 --- a/e2e/src/harness/run.ts +++ b/e2e/src/harness/run.ts @@ -19,12 +19,19 @@ import { waitForHealth } from './health'; const ROOT_DIR = path.resolve(import.meta.dirname, '../../..'); const MAX_LOG_BYTES = 256 * 1024; +function isTruthy(value: string | undefined): boolean { + if (!value) return false; + return value === '1' || value.toLowerCase() === 'true' || value.toLowerCase() === 'yes'; +} + interface ServerInfo { process: ChildProcess; baseUrl: string; port: number; stdout: string; stderr: string; + authStateDir?: string; + adminToken?: string; } function appendBounded(buffer: string, chunk: string): string { @@ -35,9 +42,55 @@ function appendBounded(buffer: string, chunk: string): string { return next.slice(next.length - MAX_LOG_BYTES); } +async function waitForFile(filePath: string, timeoutMs: number = 5000): Promise { + const deadline = Date.now() + timeoutMs; + + while (Date.now() < deadline) { + try { + const stat = fs.statSync(filePath); + if (stat.isFile() && stat.size > 0) { + return; + } + } catch { + // Ignore and keep polling + } + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + throw new Error(`Timed out waiting for file: ${filePath}`); +} + +async function readAdminToken(stateDir: string): Promise { + const tokenPath = path.join(stateDir, 'admin.token'); + await waitForFile(tokenPath); + const token = fs.readFileSync(tokenPath, 'utf8').trim(); + if (!token) { + throw new Error(`Admin token file is empty: ${tokenPath}`); + } + return token; +} + +function cleanupAuthStateDir(stateDir: string | undefined): void { + if (!stateDir) return; + if (isTruthy(process.env.E2E_KEEP_AUTH_STATE)) return; + + try { + fs.rmSync(stateDir, { recursive: true, force: true }); + } catch { + // Best-effort cleanup + } +} + async function startServer(): Promise { const port = await findFreePort(); const baseUrl = `http://127.0.0.1:${port}`; + const enableAuth = isTruthy(process.env.E2E_AUTH) || process.env.E2E_AUTH_MODE === 'enabled'; + if (enableAuth) { + fs.mkdirSync(path.join(ROOT_DIR, 'target'), { recursive: true }); + } + const authStateDir = enableAuth + ? fs.mkdtempSync(path.join(ROOT_DIR, 'target', 'e2e-auth-')) + : undefined; // Check if UI is built const uiDistPath = path.join(ROOT_DIR, 'ui/dist/index.html'); @@ -63,6 +116,12 @@ async function startServer(): Promise { ...process.env, SK_SERVER__ADDRESS: `127.0.0.1:${port}`, SK_LOG__FILE_ENABLE: 'false', // Avoid writing skit.log + ...(enableAuth + ? { + SK_AUTH__MODE: 'enabled', + SK_AUTH__STATE_DIR: authStateDir, + } + : {}), RUST_LOG: 'warn', }, stdio: ['ignore', 'pipe', 'pipe'], @@ -114,16 +173,18 @@ async function startServer(): Promise { if (trimmedStdout) console.log(`\n[skit stdout]\n${trimmedStdout}\n`); if (trimmedStderr) console.error(`\n[skit stderr]\n${trimmedStderr}\n`); } - await stopServer({ process: serverProcess, baseUrl, port, stdout, stderr }); + await stopServer({ process: serverProcess, baseUrl, port, stdout, stderr, authStateDir }); throw error; } - return { process: serverProcess, baseUrl, port, stdout, stderr }; + const adminToken = enableAuth && authStateDir ? await readAdminToken(authStateDir) : undefined; + return { process: serverProcess, baseUrl, port, stdout, stderr, authStateDir, adminToken }; } function stopServer(serverInfo: ServerInfo): Promise { return new Promise((resolve) => { if (serverInfo.process.killed || serverInfo.process.exitCode !== null) { + cleanupAuthStateDir(serverInfo.authStateDir); resolve(); return; } @@ -132,6 +193,7 @@ function stopServer(serverInfo: ServerInfo): Promise { const onExit = () => { console.log('Server stopped.'); + cleanupAuthStateDir(serverInfo.authStateDir); resolve(); }; @@ -148,13 +210,18 @@ function stopServer(serverInfo: ServerInfo): Promise { if (serverInfo.process.exitCode === null) { console.warn('Server did not exit after SIGKILL; continuing anyway.'); } + cleanupAuthStateDir(serverInfo.authStateDir); resolve(); }, 2000); }, 5000); }); } -async function runPlaywright(baseUrl: string, extraArgs: string[]): Promise { +async function runPlaywright( + baseUrl: string, + extraArgs: string[], + envOverrides: Record = {} +): Promise { return new Promise((resolve) => { const args = ['playwright', 'test', ...extraArgs]; console.log(`Running: bunx ${args.join(' ')}`); @@ -164,6 +231,7 @@ async function runPlaywright(baseUrl: string, extraArgs: string[]): Promise { try { serverInfo = await startServer(); - exitCode = await runPlaywright(serverInfo.baseUrl, playwrightArgs); + const envOverrides: Record = {}; + if (serverInfo.adminToken) { + envOverrides.E2E_ADMIN_TOKEN = serverInfo.adminToken; + } + exitCode = await runPlaywright(serverInfo.baseUrl, playwrightArgs, envOverrides); } catch (error) { console.error('Error:', error); exitCode = 1; diff --git a/e2e/tests/auth-helpers.ts b/e2e/tests/auth-helpers.ts new file mode 100644 index 00000000..7f85a148 --- /dev/null +++ b/e2e/tests/auth-helpers.ts @@ -0,0 +1,116 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +import { expect, type Page } from '@playwright/test'; +import * as fs from 'fs'; +import * as path from 'path'; +import { fileURLToPath } from 'url'; + +function readAdminTokenFromStateDir(stateDir: string): string | null { + const tokenPath = path.join(stateDir, 'admin.token'); + try { + const token = fs.readFileSync(tokenPath, 'utf8').trim(); + return token || null; + } catch { + return null; + } +} + +function discoverAdminTokenFromDisk(): string | null { + const stateDirCandidates: string[] = []; + + if (process.env.E2E_AUTH_STATE_DIR) { + stateDirCandidates.push(process.env.E2E_AUTH_STATE_DIR); + } + + if (process.env.SK_AUTH__STATE_DIR) { + stateDirCandidates.push(process.env.SK_AUTH__STATE_DIR); + } + + // Default state dir is ".streamkit/auth" relative to repo root. + // From e2e/tests/*, repo root is two directories up. + const __dirname = path.dirname(fileURLToPath(import.meta.url)); + const repoRoot = path.resolve(__dirname, '..', '..'); + stateDirCandidates.push(path.join(repoRoot, '.streamkit', 'auth')); + + for (const stateDir of stateDirCandidates) { + const token = readAdminTokenFromStateDir(stateDir); + if (token) return token; + } + + return null; +} + +export const adminToken = + process.env.E2E_ADMIN_TOKEN?.trim() || discoverAdminTokenFromDisk() || null; + +export function getAuthHeaders(): Record { + if (!adminToken) return {}; + return { Authorization: `Bearer ${adminToken}` }; +} + +/** + * Logs in via the UI if the login view is currently shown. + * + * When auth is disabled, clicks "Continue without auth". + * + * Fails with a clear message when auth is enabled but no admin token is available. + */ +export async function ensureLoggedIn(page: Page): Promise { + const loginView = page.getByTestId('login-view'); + const designView = page.getByTestId('design-view'); + const appViews = [ + designView, + page.getByTestId('monitor-view'), + page.getByTestId('convert-view'), + page.getByTestId('stream-view'), + page.getByTestId('tokens-view'), + ]; + + // Wait for the app to settle on either: + // - the login screen, or + // - any primary app view. + // + // When auth is enabled, the app may briefly show a loading spinner before redirecting to /login. + await Promise.race([ + loginView.waitFor({ state: 'visible', timeout: 30000 }), + ...appViews.map((view) => view.waitFor({ state: 'visible', timeout: 30000 })), + ]).catch(() => { + throw new Error('Timed out waiting for StreamKit UI to show a view (login or app)'); + }); + + // If we're already on an app view (design/monitor/convert/stream/tokens), we're done. + if (!(await loginView.isVisible().catch(() => false))) { + return; + } + + const meResponse = await page.request.get('/api/v1/auth/me'); + const meBody = (await meResponse.json()) as { auth_enabled?: boolean; authenticated?: boolean }; + + if (meBody.auth_enabled === false) { + const continueWithoutAuth = page.getByTestId('login-continue-without-auth'); + await expect(continueWithoutAuth).toBeEnabled({ timeout: 20000 }); + await continueWithoutAuth.click(); + await expect(designView).toBeVisible({ timeout: 20000 }); + return; + } + + if (!adminToken) { + throw new Error( + 'Login required but admin token is not available. ' + + 'Set E2E_ADMIN_TOKEN or point E2E_AUTH_STATE_DIR/SK_AUTH__STATE_DIR to a directory containing admin.token.' + ); + } + + // If already authenticated, the app will redirect away from /login automatically. + if (meBody.authenticated === true) { + await expect(designView).toBeVisible({ timeout: 20000 }); + return; + } + + await page.getByTestId('login-token-input').fill(adminToken); + await page.getByTestId('login-submit').click(); + + await expect(designView).toBeVisible({ timeout: 30000 }); +} diff --git a/e2e/tests/auth.spec.ts b/e2e/tests/auth.spec.ts new file mode 100644 index 00000000..437542d1 --- /dev/null +++ b/e2e/tests/auth.spec.ts @@ -0,0 +1,55 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +import { test, expect } from '@playwright/test'; + +import { adminToken, ensureLoggedIn } from './auth-helpers'; + +test.describe('Auth Flow', () => { + test('requires auth for API and redirects UI to login', async ({ page }) => { + const me = await page.request.get('/api/v1/auth/me'); + const meBody = (await me.json()) as { auth_enabled: boolean }; + test.skip(!meBody.auth_enabled, 'Auth is disabled for this server'); + test.skip( + !adminToken, + 'Admin token not available (set E2E_ADMIN_TOKEN or generate admin.token)' + ); + + const unauthenticatedList = await page.request.get('/api/v1/sessions'); + expect(unauthenticatedList.status()).toBe(401); + + await page.goto('/design'); + await expect(page.getByTestId('login-view')).toBeVisible(); + }); + + test('signs in with token, grants cookie access, and supports logout', async ({ page }) => { + const me = await page.request.get('/api/v1/auth/me'); + const meBody = (await me.json()) as { auth_enabled: boolean }; + test.skip(!meBody.auth_enabled, 'Auth is disabled for this server'); + test.skip( + !adminToken, + 'Admin token not available (set E2E_ADMIN_TOKEN or generate admin.token)' + ); + + await page.goto('/design'); + await expect(page.getByTestId('login-view')).toBeVisible(); + + await ensureLoggedIn(page); + + await expect(page.getByTestId('design-view')).toBeVisible(); + await expect(page.getByRole('link', { name: 'Admin' })).toBeVisible(); + + const authenticatedList = await page.request.get('/api/v1/sessions'); + expect(authenticatedList.ok()).toBeTruthy(); + + await page.goto('/admin/tokens'); + await expect(page.getByTestId('tokens-view')).toBeVisible(); + + await page.getByTestId('tokens-logout').click(); + await expect(page.getByTestId('login-view')).toBeVisible(); + + const loggedOutList = await page.request.get('/api/v1/sessions'); + expect(loggedOutList.status()).toBe(401); + }); +}); diff --git a/e2e/tests/design.spec.ts b/e2e/tests/design.spec.ts index e45dd9a6..c07f8709 100644 --- a/e2e/tests/design.spec.ts +++ b/e2e/tests/design.spec.ts @@ -4,9 +4,12 @@ import { test, expect } from '@playwright/test'; +import { ensureLoggedIn } from './auth-helpers'; + test.describe('Design View', () => { test.beforeEach(async ({ page }) => { await page.goto('/design'); + await ensureLoggedIn(page); // Wait for the design view to load await expect(page.getByTestId('design-view')).toBeVisible(); }); diff --git a/e2e/tests/monitor.spec.ts b/e2e/tests/monitor.spec.ts index 92b45a19..d0905e10 100644 --- a/e2e/tests/monitor.spec.ts +++ b/e2e/tests/monitor.spec.ts @@ -4,6 +4,8 @@ import { test, expect, request } from '@playwright/test'; +import { ensureLoggedIn, getAuthHeaders } from './auth-helpers'; + test.describe('Monitor View - Session Lifecycle', () => { // Unique session name for this test run const testSessionName = `e2e-test-session-${Date.now()}`; @@ -21,6 +23,10 @@ steps: test.beforeEach(async ({ page }) => { await page.goto('/monitor'); + await ensureLoggedIn(page); + if (!page.url().includes('/monitor')) { + await page.goto('/monitor'); + } await expect(page.getByTestId('monitor-view')).toBeVisible(); }); @@ -28,7 +34,10 @@ steps: page, baseURL, }) => { - const apiContext = await request.newContext({ baseURL: baseURL! }); + const apiContext = await request.newContext({ + baseURL: baseURL!, + extraHTTPHeaders: getAuthHeaders(), + }); try { // Step 1: Create session via API @@ -80,7 +89,10 @@ steps: // Cleanup: ensure session is deleted even if test fails if (sessionId) { try { - const apiContext = await request.newContext({ baseURL: baseURL! }); + const apiContext = await request.newContext({ + baseURL: baseURL!, + extraHTTPHeaders: getAuthHeaders(), + }); await apiContext.delete(`/api/v1/sessions/${sessionId}`); await apiContext.dispose(); } catch { diff --git a/justfile b/justfile index 9c1661de..3203d93f 100644 --- a/justfile +++ b/justfile @@ -1001,6 +1001,18 @@ e2e-headed: build-ui install-e2e @cargo build -p streamkit-server --bin skit @cd e2e && bun run test:headed +# Run E2E tests with built-in auth enabled +e2e-auth: build-ui install-e2e + @echo "Building skit (debug)..." + @cargo build -p streamkit-server --bin skit + @echo "Running E2E tests (auth enabled)..." + @cd e2e && E2E_AUTH=1 bun run test + +# Run E2E tests with built-in auth enabled + headed browser +e2e-auth-headed: build-ui install-e2e + @cargo build -p streamkit-server --bin skit + @cd e2e && E2E_AUTH=1 bun run test:headed + # Run E2E against external server e2e-external url: @echo "Running E2E tests against {{url}}..." diff --git a/samples/loadtest/pipelines/moq_broadcaster.yml b/samples/loadtest/pipelines/moq_broadcaster.yml index 2919265e..920915a8 100644 --- a/samples/loadtest/pipelines/moq_broadcaster.yml +++ b/samples/loadtest/pipelines/moq_broadcaster.yml @@ -24,5 +24,7 @@ nodes: kind: transport::moq::publisher params: url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" broadcast: input needs: pacer diff --git a/samples/loadtest/pipelines/moq_mixing_selfcontained.yml b/samples/loadtest/pipelines/moq_mixing_selfcontained.yml index 9d5b159e..59701a77 100644 --- a/samples/loadtest/pipelines/moq_mixing_selfcontained.yml +++ b/samples/loadtest/pipelines/moq_mixing_selfcontained.yml @@ -27,6 +27,8 @@ nodes: params: broadcast: input url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" needs: mic_pacer # Subscriber consumes the mic broadcast from MoQ (same as UI pipeline). @@ -37,6 +39,8 @@ nodes: broadcast: input max_retries: 5 url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" # Music bed (file-based). music_file_reader: @@ -91,4 +95,6 @@ nodes: params: broadcast: output url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" needs: encoder diff --git a/samples/loadtest/pipelines/moq_selfcontained.yml b/samples/loadtest/pipelines/moq_selfcontained.yml index f0172bd1..d79d83a3 100644 --- a/samples/loadtest/pipelines/moq_selfcontained.yml +++ b/samples/loadtest/pipelines/moq_selfcontained.yml @@ -25,6 +25,8 @@ nodes: kind: transport::moq::publisher params: url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" broadcast: input needs: pacer @@ -32,6 +34,8 @@ nodes: kind: transport::moq::subscriber params: url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" broadcast: input decoder: kind: audio::opus::decoder @@ -48,6 +52,8 @@ nodes: kind: transport::moq::publisher params: url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" broadcast: output needs: encoder name: MoQ Transcoder (Self-Contained) diff --git a/samples/loadtest/pipelines/moq_subscriber_transcode.yml b/samples/loadtest/pipelines/moq_subscriber_transcode.yml index 93666550..79174671 100644 --- a/samples/loadtest/pipelines/moq_subscriber_transcode.yml +++ b/samples/loadtest/pipelines/moq_subscriber_transcode.yml @@ -10,6 +10,8 @@ nodes: kind: transport::moq::subscriber params: url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" broadcast: input decode: @@ -30,5 +32,7 @@ nodes: kind: transport::moq::publisher params: url: http://localhost:4443 + # Optional: JWT for authenticated relays (appended as `?jwt=...`). + # jwt: "" broadcast: output needs: encode diff --git a/samples/skit.toml b/samples/skit.toml index c5fa8b51..b6a1e139 100644 --- a/samples/skit.toml +++ b/samples/skit.toml @@ -66,6 +66,33 @@ allowed_origins = [ # "http://localhost:*", # Keep for development # ] +[auth] +# Built-in JWT authentication for: +# - HTTP API + Web UI (cookie or Authorization header) +# - WebSocket control plane +# - MoQ/WebTransport (via MoQ tokens; passed as ?jwt=) +# +# Modes: +# - "auto" (default): disabled on loopback binds, enabled on non-loopback binds +# - "enabled": always require auth +# - "disabled": never require auth (NOT recommended outside localhost) +mode = "auto" + +# Directory for auth state (keys, token metadata, revocations). +# Persist/mount this path in Docker deployments. +# state_dir = ".streamkit/auth" + +# Cookie name used by the Web UI (HttpOnly). +# cookie_name = "skit_session" + +# API token TTL defaults (seconds). +# api_default_ttl_secs = 86400 # 24 hours +# api_max_ttl_secs = 2592000 # 30 days + +# MoQ token TTL defaults (seconds). +# moq_default_ttl_secs = 3600 # 1 hour +# moq_max_ttl_secs = 86400 # 1 day + [log] # Enable console logging (stdout/stderr) console_enable = true @@ -270,8 +297,8 @@ enabled = false # fallback_params = { model_dir = "models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17", language = "auto", num_threads = 4, execution_provider = "cpu" } [permissions] -# Default role for unauthenticated requests -# Options: "admin", "user", or any custom role defined below +# Default role for unauthenticated requests (only when built-in auth is disabled) +# Options: "admin", "user", "viewer", or any custom role defined below default_role = "admin" # Optional trusted HTTP header used to select a role (e.g. "x-role") @@ -295,7 +322,7 @@ default_role = "admin" # # SECURITY NOTE: When defining roles in TOML, set boolean fields explicitly. # Some booleans default to `true` when omitted. -# Built-in roles (admin, user) are defined in code with appropriate permissions. +# Built-in roles (admin, user, viewer) are defined in code with appropriate permissions. # Custom roles defined here override the built-in ones. [permissions.roles.admin] diff --git a/ui/src/App.tsx b/ui/src/App.tsx index 88f8b28f..93298f2c 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -12,14 +12,17 @@ import { TooltipProvider } from './components/Tooltip'; import { ThemeProvider } from './context/ThemeContext'; import { ToastProvider } from './context/ToastContext'; import Layout from './Layout'; +import { fetchAuthMe } from './services/auth'; import { initializePermissions } from './services/permissions'; import { ensureSchemasLoaded } from './stores/schemaStore'; import { getBasePathname } from './utils/baseHref'; import { getLogger } from './utils/logger'; import ConvertView from './views/ConvertView'; import DesignView from './views/DesignView'; +import LoginView from './views/LoginView'; import MonitorView from './views/MonitorView'; import StreamView from './views/StreamView'; +import TokensView from './views/TokensView'; const logger = getLogger('App'); @@ -35,23 +38,46 @@ const queryClient = new QueryClient({ }); const App: React.FC = () => { - const [schemasLoaded, setSchemasLoaded] = useState(false); + const [appReady, setAppReady] = useState(false); + const [requiresLogin, setRequiresLogin] = useState(false); useEffect(() => { - // Initialize permissions and schemas in parallel - Promise.all([ - initializePermissions().catch((err) => { - logger.error('Failed to initialize permissions:', err); - }), - ensureSchemasLoaded().catch((err) => { - logger.error('Failed to load schemas on startup:', err); - }), - ]).finally(() => { - setSchemasLoaded(true); - }); + let cancelled = false; + + (async () => { + try { + const me = await fetchAuthMe(); + if (cancelled) return; + + if (me.auth_enabled && !me.authenticated) { + setRequiresLogin(true); + setAppReady(true); + return; + } + } catch (err) { + logger.error('Failed to check auth status:', err); + } + + await Promise.all([ + initializePermissions().catch((err) => { + logger.error('Failed to initialize permissions:', err); + }), + ensureSchemasLoaded().catch((err) => { + logger.error('Failed to load schemas on startup:', err); + }), + ]); + + if (cancelled) return; + setRequiresLogin(false); + setAppReady(true); + })(); + + return () => { + cancelled = true; + }; }, []); - if (!schemasLoaded) { + if (!appReady) { return (
{ - }> + setRequiresLogin(false)} />} + /> + : } + > } /> } /> } /> } /> } /> + } /> diff --git a/ui/src/Layout.tsx b/ui/src/Layout.tsx index 79048481..9752b030 100644 --- a/ui/src/Layout.tsx +++ b/ui/src/Layout.tsx @@ -13,6 +13,7 @@ import { LayoutPresetButtons } from './components/LayoutPresetButtons'; import { Button } from './components/ui/Button'; import { useTheme, type ColorMode } from './context/ThemeContext'; import { LAYOUT_PRESETS, useLayoutStore, type LayoutPreset } from './stores/layoutStore'; +import { usePermissionStore } from './stores/permissionStore'; const LayoutContainer = styled.div` display: flex; @@ -184,10 +185,13 @@ const ItemLabel = styled.span` const Main = styled.main` flex: 1; overflow: hidden; + min-width: 0; + min-height: 0; `; const Layout: React.FC = () => { const { colorMode, setColorMode } = useTheme(); + const role = usePermissionStore((s) => s.role); const { currentPreset, setPreset } = useLayoutStore( useShallow((state) => ({ currentPreset: state.currentPreset, @@ -212,6 +216,7 @@ const Layout: React.FC = () => { Monitor Convert Stream + {role === 'admin' && Admin} diff --git a/ui/src/hooks/useSession.ts b/ui/src/hooks/useSession.ts index 8fabf276..f2cbe4a1 100644 --- a/ui/src/hooks/useSession.ts +++ b/ui/src/hooks/useSession.ts @@ -7,7 +7,7 @@ import { useEffect, useCallback } from 'react'; import { v4 as uuidv4 } from 'uuid'; import { useShallow } from 'zustand/shallow'; -import { getApiUrl } from '@/services/base'; +import { fetchApi } from '@/services/base'; import { getWebSocketService } from '@/services/websocket'; import { useNodeParamsStore } from '@/stores/nodeParamsStore'; import { useSessionStore } from '@/stores/sessionStore'; @@ -16,8 +16,7 @@ import type { Pipeline, NodeState, Request, MessageType, BatchOperation } from ' const EMPTY_NODE_STATES: Record = Object.freeze({}); async function fetchPipeline(sessionId: string): Promise { - const apiUrl = getApiUrl(); - const response = await fetch(`${apiUrl}/api/v1/sessions/${sessionId}/pipeline`); + const response = await fetchApi(`/api/v1/sessions/${sessionId}/pipeline`); if (!response.ok) { throw new Error(`Failed to fetch pipeline: ${response.statusText}`); } diff --git a/ui/src/hooks/useSessionsPrefetch.ts b/ui/src/hooks/useSessionsPrefetch.ts index e5927581..a9229b0b 100644 --- a/ui/src/hooks/useSessionsPrefetch.ts +++ b/ui/src/hooks/useSessionsPrefetch.ts @@ -5,13 +5,12 @@ import { useQuery } from '@tanstack/react-query'; import { useEffect } from 'react'; -import { getApiUrl } from '@/services/base'; +import { fetchApi } from '@/services/base'; import { useSessionStore } from '@/stores/sessionStore'; import type { Pipeline, SessionInfo } from '@/types/types'; async function fetchPipeline(sessionId: string): Promise { - const apiUrl = getApiUrl(); - const response = await fetch(`${apiUrl}/api/v1/sessions/${sessionId}/pipeline`); + const response = await fetchApi(`/api/v1/sessions/${sessionId}/pipeline`); if (!response.ok) { throw new Error(`Failed to fetch pipeline: ${response.statusText}`); } diff --git a/ui/src/services/assets.ts b/ui/src/services/assets.ts index 0345f8e1..0be53be9 100644 --- a/ui/src/services/assets.ts +++ b/ui/src/services/assets.ts @@ -11,7 +11,7 @@ import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'; import type { AudioAsset } from '@/types/generated/api-types'; import { getLogger } from '@/utils/logger'; -import { getApiUrl } from './base'; +import { fetchApi } from './base'; const logger = getLogger('assets'); @@ -20,12 +20,9 @@ const logger = getLogger('assets'); * @returns A promise that resolves to an array of audio assets */ export async function listAudioAssets(): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/assets/audio`; + logger.info('Fetching audio assets'); - logger.info('Fetching audio assets from:', endpoint); - - const response = await fetch(endpoint, { + const response = await fetchApi('/api/v1/assets/audio', { method: 'GET', headers: { 'Content-Type': 'application/json', @@ -54,15 +51,12 @@ export async function listAudioAssets(): Promise { * @returns A promise that resolves to the created audio asset */ export async function uploadAudioAsset(file: File): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/assets/audio`; - logger.info('Uploading audio asset:', file.name); const formData = new FormData(); formData.append('file', file); - const response = await fetch(endpoint, { + const response = await fetchApi('/api/v1/assets/audio', { method: 'POST', body: formData, }); @@ -90,12 +84,9 @@ export async function uploadAudioAsset(file: File): Promise { * @returns A promise that resolves when the asset is deleted */ export async function deleteAudioAsset(id: string): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/assets/audio/${encodeURIComponent(id)}`; - logger.info('Deleting audio asset:', id); - const response = await fetch(endpoint, { + const response = await fetchApi(`/api/v1/assets/audio/${encodeURIComponent(id)}`, { method: 'DELETE', }); diff --git a/ui/src/services/auth.ts b/ui/src/services/auth.ts new file mode 100644 index 00000000..8f6ae336 --- /dev/null +++ b/ui/src/services/auth.ts @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +import { fetchApi } from './base'; + +export interface AuthMeResponse { + authenticated: boolean; + auth_enabled: boolean; + role: string | null; + jti: string | null; +} + +export interface CreateTokenResponse { + token: string; + jti: string; + exp: number; + url_template?: string; +} + +export interface TokenInfo { + jti: string; + token_type: string; + role: string | null; + label: string | null; + created_at: number; + exp: number; + revoked: boolean; + created_by: string; +} + +export interface CreateApiTokenRequest { + role: string; + label?: string; + ttl_secs?: number; +} + +export interface CreateMoqTokenRequest { + root: string; + subscribe?: string[]; + publish?: string[]; + label?: string; + ttl_secs?: number; +} + +export async function fetchAuthMe(): Promise { + const response = await fetchApi('/api/v1/auth/me', { method: 'GET' }); + if (!response.ok) { + throw new Error(`Failed to fetch auth status: ${response.status} ${response.statusText}`); + } + return response.json() as Promise; +} + +export async function loginWithToken(token: string): Promise { + const response = await fetchApi('/api/v1/auth/login', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ token }), + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error(errorText || `Login failed: ${response.status} ${response.statusText}`); + } +} + +export async function logout(): Promise { + const response = await fetchApi('/api/v1/auth/logout', { method: 'POST' }); + if (!response.ok) { + const errorText = await response.text(); + throw new Error(errorText || `Logout failed: ${response.status} ${response.statusText}`); + } +} + +export async function listTokens(): Promise { + const response = await fetchApi('/api/v1/auth/tokens', { method: 'GET' }); + if (!response.ok) { + const errorText = await response.text(); + throw new Error( + errorText || `Failed to list tokens: ${response.status} ${response.statusText}` + ); + } + return response.json() as Promise; +} + +export async function createApiToken(req: CreateApiTokenRequest): Promise { + const response = await fetchApi('/api/v1/auth/tokens', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(req), + }); + if (!response.ok) { + const errorText = await response.text(); + throw new Error( + errorText || `Failed to create API token: ${response.status} ${response.statusText}` + ); + } + return response.json() as Promise; +} + +export async function revokeToken(jti: string): Promise { + const response = await fetchApi(`/api/v1/auth/tokens/${encodeURIComponent(jti)}`, { + method: 'DELETE', + }); + if (!response.ok) { + const errorText = await response.text(); + throw new Error( + errorText || `Failed to revoke token: ${response.status} ${response.statusText}` + ); + } +} + +export async function createMoqToken(req: CreateMoqTokenRequest): Promise { + const response = await fetchApi('/api/v1/auth/moq-tokens', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + ...req, + subscribe: req.subscribe ?? [], + publish: req.publish ?? [], + }), + }); + if (!response.ok) { + const errorText = await response.text(); + throw new Error( + errorText || `Failed to create MoQ token: ${response.status} ${response.statusText}` + ); + } + return response.json() as Promise; +} diff --git a/ui/src/services/base.ts b/ui/src/services/base.ts index 4d2ba1fb..d0c91757 100644 --- a/ui/src/services/base.ts +++ b/ui/src/services/base.ts @@ -6,7 +6,7 @@ * Base service utilities shared across all service modules */ -import { getBaseHrefWithoutTrailingSlash } from '../utils/baseHref'; +import { getBaseHrefWithoutTrailingSlash, getBasePathname } from '../utils/baseHref'; /** * Gets the API base URL (handles both dev and production) @@ -23,6 +23,24 @@ export function getApiUrl(): string { // In production, VITE_API_BASE is undefined, so we fall through to tag logic const apiBase = import.meta.env.VITE_API_BASE; if (apiBase !== undefined) { + // Cookie auth uses `SameSite=Strict`, which requires the UI and API to be the same "site" + // (scheme + registrable domain), not necessarily the same origin. + // + // In local development it's common to mix `localhost` and `127.0.0.1`. Those are treated as + // different sites by browsers, which breaks cookie-based auth flows. When both sides are + // loopback, rewrite the API hostname to match the current UI hostname. + try { + const url = new URL(apiBase); + const isLoopback = (host: string) => host === 'localhost' || host === '127.0.0.1'; + + if (isLoopback(url.hostname) && isLoopback(window.location.hostname)) { + url.hostname = window.location.hostname; + return url.toString().replace(/\/$/, ''); + } + } catch { + // If parsing fails, fall back to the raw value. + } + return apiBase; } @@ -32,3 +50,33 @@ export function getApiUrl(): string { // No base tag - use origin for root deployment return window.location.origin; } + +function ensureLeadingSlash(path: string): string { + return path.startsWith('/') ? path : `/${path}`; +} + +/** + * Fetch helper for StreamKit API calls. + * + * - Always sets `credentials: 'include'` so cookie auth works in dev (cross-origin) + * - Redirects to `/login` on 401 (except when already on the login route) + */ +export async function fetchApi(path: string, options: RequestInit = {}): Promise { + const apiUrl = getApiUrl(); + const url = `${apiUrl}${ensureLeadingSlash(path)}`; + + const response = await fetch(url, { + ...options, + credentials: 'include', + }); + + if (response.status === 401) { + const basePathname = getBasePathname(); + const loginPath = `${basePathname}/login`; + if (window.location.pathname !== loginPath) { + window.location.assign(loginPath); + } + } + + return response; +} diff --git a/ui/src/services/config.ts b/ui/src/services/config.ts index 790913cf..8a3c1bcb 100644 --- a/ui/src/services/config.ts +++ b/ui/src/services/config.ts @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 -import { getApiUrl } from './base'; +import { fetchApi } from './base'; /** * Frontend configuration fetched from the server @@ -15,8 +15,7 @@ export interface FrontendConfig { * Fetch frontend configuration from the server */ export async function fetchConfig(): Promise { - const apiUrl = getApiUrl(); - const response = await fetch(`${apiUrl}/api/v1/config`); + const response = await fetchApi('/api/v1/config'); if (!response.ok) { throw new Error(`Failed to fetch config: ${response.statusText}`); diff --git a/ui/src/services/converter.test.ts b/ui/src/services/converter.test.ts index 5a0504e8..95cadfa0 100644 --- a/ui/src/services/converter.test.ts +++ b/ui/src/services/converter.test.ts @@ -29,6 +29,10 @@ vi.mock('@/utils/logger', () => ({ vi.mock('./base', () => ({ getApiUrl: () => 'http://localhost:4545', + fetchApi: (path: string, options: RequestInit = {}) => { + const normalized = path.startsWith('/') ? path : `/${path}`; + return fetch(`http://localhost:4545${normalized}`, { ...options, credentials: 'include' }); + }, })); describe('converter service', () => { diff --git a/ui/src/services/converter.ts b/ui/src/services/converter.ts index b7aedf8f..b319bc96 100644 --- a/ui/src/services/converter.ts +++ b/ui/src/services/converter.ts @@ -9,7 +9,7 @@ import { getLogger } from '@/utils/logger'; import { canUseMseForMimeType } from '@/utils/mse'; -import { getApiUrl } from './base'; +import { fetchApi } from './base'; const logger = getLogger('converter'); @@ -201,18 +201,14 @@ export async function convertFile( } // Determine the API URL - const apiUrl = getApiUrl(); - const processEndpoint = `${apiUrl}/api/v1/process`; - logger.info('Starting conversion:', { - endpoint: processEndpoint, fileName: mediaFile?.name || '(asset-based)', fileSize: mediaFile?.size || 0, pipelineLength: pipelineYaml.length, }); // Make the request - const response = await fetch(processEndpoint, { + const response = await fetchApi('/api/v1/process', { method: 'POST', body: formData, signal, diff --git a/ui/src/services/permissions.test.ts b/ui/src/services/permissions.test.ts index 2aefe492..7bfffa97 100644 --- a/ui/src/services/permissions.test.ts +++ b/ui/src/services/permissions.test.ts @@ -12,6 +12,10 @@ import { initializePermissions } from './permissions'; // Mock getApiUrl to return a consistent test URL vi.mock('./base', () => ({ getApiUrl: () => 'http://localhost:4545', + fetchApi: (path: string, options: RequestInit = {}) => { + const normalized = path.startsWith('/') ? path : `/${path}`; + return fetch(`http://localhost:4545${normalized}`, { ...options, credentials: 'include' }); + }, })); const DENY_ALL_PERMISSIONS = { @@ -121,6 +125,7 @@ describe('permissions', () => { headers: { 'Content-Type': 'application/json', }, + credentials: 'include', }); }); diff --git a/ui/src/services/permissions.ts b/ui/src/services/permissions.ts index ee0be600..d487e18b 100644 --- a/ui/src/services/permissions.ts +++ b/ui/src/services/permissions.ts @@ -6,7 +6,7 @@ import { usePermissionStore, type Permissions } from '@/stores/permissionStore'; import type { PermissionsInfo } from '@/types/generated/api-types'; import { getLogger } from '@/utils/logger'; -import { getApiUrl } from './base'; +import { fetchApi } from './base'; const logger = getLogger('permissions'); @@ -46,8 +46,7 @@ function convertPermissions(apiPerms: PermissionsInfo): Permissions { */ export async function initializePermissions(): Promise { try { - const apiUrl = getApiUrl(); - const response = await fetch(`${apiUrl}/api/v1/permissions`, { + const response = await fetchApi('/api/v1/permissions', { method: 'GET', headers: { 'Content-Type': 'application/json', diff --git a/ui/src/services/plugins.ts b/ui/src/services/plugins.ts index 864576b4..07eb312b 100644 --- a/ui/src/services/plugins.ts +++ b/ui/src/services/plugins.ts @@ -4,14 +4,13 @@ import type { PluginSummary } from '@/types/types'; -import { getApiUrl } from './base'; +import { fetchApi } from './base'; export async function uploadPlugin(file: File): Promise { const formData = new FormData(); formData.append('plugin', file, file.name); - const apiUrl = getApiUrl(); - const response = await fetch(`${apiUrl}/api/v1/plugins`, { + const response = await fetchApi('/api/v1/plugins', { method: 'POST', body: formData, }); @@ -28,9 +27,8 @@ export async function deletePlugin( kind: string, options?: { keepFile?: boolean } ): Promise { - const apiUrl = getApiUrl(); const query = options?.keepFile ? '?keep_file=true' : ''; - const response = await fetch(`${apiUrl}/api/v1/plugins/${encodeURIComponent(kind)}${query}`, { + const response = await fetchApi(`/api/v1/plugins/${encodeURIComponent(kind)}${query}`, { method: 'DELETE', }); diff --git a/ui/src/services/samples.ts b/ui/src/services/samples.ts index 51980da3..93c6fd4f 100644 --- a/ui/src/services/samples.ts +++ b/ui/src/services/samples.ts @@ -9,7 +9,7 @@ import type { SamplePipeline, SavePipelineRequest } from '@/types/generated/api-types'; import { getLogger } from '@/utils/logger'; -import { getApiUrl } from './base'; +import { fetchApi } from './base'; const logger = getLogger('samples'); @@ -18,12 +18,9 @@ const logger = getLogger('samples'); * @returns A promise that resolves to an array of sample pipelines */ export async function listSamples(): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/samples/oneshot`; + logger.info('Fetching sample pipelines'); - logger.info('Fetching sample pipelines from:', endpoint); - - const response = await fetch(endpoint, { + const response = await fetchApi('/api/v1/samples/oneshot', { method: 'GET', headers: { 'Content-Type': 'application/json', @@ -68,12 +65,9 @@ export async function listAllSamples(): Promise { * @returns A promise that resolves to an array of dynamic sample pipelines */ export async function listDynamicSamples(): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/samples/dynamic`; - - logger.info('Fetching dynamic sample pipelines from:', endpoint); + logger.info('Fetching dynamic sample pipelines'); - const response = await fetch(endpoint, { + const response = await fetchApi('/api/v1/samples/dynamic', { method: 'GET', headers: { 'Content-Type': 'application/json', @@ -102,12 +96,9 @@ export async function listDynamicSamples(): Promise { * @returns A promise that resolves to the sample pipeline */ export async function getSample(id: string): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/samples/oneshot/${encodeURIComponent(id)}`; - logger.info('Fetching sample pipeline:', id); - const response = await fetch(endpoint, { + const response = await fetchApi(`/api/v1/samples/oneshot/${encodeURIComponent(id)}`, { method: 'GET', headers: { 'Content-Type': 'application/json', @@ -137,12 +128,9 @@ export async function getSample(id: string): Promise { * @returns A promise that resolves to the created sample pipeline */ export async function saveSample(request: SavePipelineRequest): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/samples/oneshot`; - logger.info('Saving user pipeline:', request.name); - const response = await fetch(endpoint, { + const response = await fetchApi('/api/v1/samples/oneshot', { method: 'POST', headers: { 'Content-Type': 'application/json', @@ -175,12 +163,9 @@ export async function saveSample(request: SavePipelineRequest): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/samples/oneshot/${encodeURIComponent(id)}`; - logger.info('Deleting user pipeline:', id); - const response = await fetch(endpoint, { + const response = await fetchApi(`/api/v1/samples/oneshot/${encodeURIComponent(id)}`, { method: 'DELETE', }); diff --git a/ui/src/services/sessions.ts b/ui/src/services/sessions.ts index a45de798..6d4c7927 100644 --- a/ui/src/services/sessions.ts +++ b/ui/src/services/sessions.ts @@ -9,7 +9,7 @@ import type { SessionInfo } from '@/types/types'; import { getLogger } from '@/utils/logger'; -import { getApiUrl } from './base'; +import { fetchApi } from './base'; const logger = getLogger('sessions'); @@ -29,10 +29,7 @@ interface CreateSessionResponse { * @returns A promise that resolves to an array of sessions */ export async function listSessions(): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/sessions`; - - const response = await fetch(endpoint, { + const response = await fetchApi('/api/v1/sessions', { method: 'GET', headers: { 'Content-Type': 'application/json', @@ -56,9 +53,6 @@ export async function createSession( name: string | null, yaml: string ): Promise { - const apiUrl = getApiUrl(); - const endpoint = `${apiUrl}/api/v1/sessions`; - logger.info('Creating session:', name || '(unnamed)'); const request: CreateSessionRequest = { @@ -66,7 +60,7 @@ export async function createSession( yaml, }; - const response = await fetch(endpoint, { + const response = await fetchApi('/api/v1/sessions', { method: 'POST', headers: { 'Content-Type': 'application/json', diff --git a/ui/src/services/websocket.ts b/ui/src/services/websocket.ts index b146a9eb..6ffaa1af 100644 --- a/ui/src/services/websocket.ts +++ b/ui/src/services/websocket.ts @@ -441,7 +441,24 @@ export function getWebSocketService(): WebSocketService { const devWsUrl = import.meta.env.VITE_WS_URL; const wsUrl = - devWsUrl || + (devWsUrl + ? (() => { + // Keep cookie-based auth working in dev when mixing localhost and 127.0.0.1. + // Cookies are keyed by hostname; if the UI is on localhost but the WS URL uses + // 127.0.0.1 (or vice-versa), the session cookie won't be sent. + try { + const url = new URL(devWsUrl); + const isLoopback = (host: string) => host === 'localhost' || host === '127.0.0.1'; + if (isLoopback(url.hostname) && isLoopback(window.location.hostname)) { + url.hostname = window.location.hostname; + return url.toString(); + } + } catch { + // Ignore and fall back to the raw value. + } + return devWsUrl; + })() + : undefined) || (() => { // Fallback for production: check for tag to handle subpath deployments const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:'; diff --git a/ui/src/stores/pluginStore.ts b/ui/src/stores/pluginStore.ts index 7090670c..50649990 100644 --- a/ui/src/stores/pluginStore.ts +++ b/ui/src/stores/pluginStore.ts @@ -4,7 +4,7 @@ import { create } from 'zustand'; -import { getApiUrl } from '@/services/base'; +import { fetchApi } from '@/services/base'; import type { PluginSummary } from '@/types/types'; type PluginState = { @@ -39,8 +39,7 @@ export function ensurePluginsLoaded(): Promise { if (isLoaded) return Promise.resolve(); if (inFlight) return inFlight; - const apiUrl = getApiUrl(); - inFlight = fetch(`${apiUrl}/api/v1/plugins`) + inFlight = fetchApi('/api/v1/plugins') .then((res) => { if (!res.ok) { throw new Error(`Failed to fetch plugins: ${res.status} ${res.statusText}`); @@ -60,8 +59,7 @@ export function ensurePluginsLoaded(): Promise { } export async function reloadPlugins(): Promise { - const apiUrl = getApiUrl(); - const res = await fetch(`${apiUrl}/api/v1/plugins`); + const res = await fetchApi('/api/v1/plugins'); if (!res.ok) { throw new Error(`Failed to fetch plugins: ${res.status} ${res.statusText}`); } diff --git a/ui/src/stores/schemaStore.ts b/ui/src/stores/schemaStore.ts index 5340cfa3..5fab9c19 100644 --- a/ui/src/stores/schemaStore.ts +++ b/ui/src/stores/schemaStore.ts @@ -4,7 +4,7 @@ import { create } from 'zustand'; -import { getApiUrl } from '@/services/base'; +import { fetchApi } from '@/services/base'; import type { PacketTypeMeta } from '@/types/generated/api-types'; import type { NodeDefinition } from '@/types/types'; @@ -36,10 +36,9 @@ export function ensureSchemasLoaded(): Promise { if (inFlight) return inFlight; inFlight = (async () => { - const apiUrl = getApiUrl(); const [typesRes, nodesRes] = await Promise.all([ - fetch(`${apiUrl}/api/v1/schema/packets`), - fetch(`${apiUrl}/api/v1/schema/nodes`), + fetchApi('/api/v1/schema/packets'), + fetchApi('/api/v1/schema/nodes'), ]); if (!typesRes.ok) { @@ -66,10 +65,9 @@ export function ensureSchemasLoaded(): Promise { } export async function reloadSchemas(): Promise { - const apiUrl = getApiUrl(); const [typesRes, nodesRes] = await Promise.all([ - fetch(`${apiUrl}/api/v1/schema/packets`), - fetch(`${apiUrl}/api/v1/schema/nodes`), + fetchApi('/api/v1/schema/packets'), + fetchApi('/api/v1/schema/nodes'), ]); if (!typesRes.ok) { diff --git a/ui/src/stores/streamStore.test.ts b/ui/src/stores/streamStore.test.ts index 0a0d0577..94f4193e 100644 --- a/ui/src/stores/streamStore.test.ts +++ b/ui/src/stores/streamStore.test.ts @@ -61,6 +61,7 @@ describe('streamStore', () => { status: 'disconnected', connectionMode: 'session', serverUrl: '', + moqToken: '', inputBroadcast: 'input', outputBroadcast: 'output', enablePublish: true, @@ -134,6 +135,13 @@ describe('streamStore', () => { expect(useStreamStore.getState().serverUrl).toBe('http://example.com:8080/moq'); }); + it('should set MoQ token', () => { + const { setMoqToken } = useStreamStore.getState(); + setMoqToken('jwt-token-123'); + + expect(useStreamStore.getState().moqToken).toBe('jwt-token-123'); + }); + it('should set input broadcast', () => { const { setInputBroadcast } = useStreamStore.getState(); setInputBroadcast('custom-input'); diff --git a/ui/src/stores/streamStore.ts b/ui/src/stores/streamStore.ts index 71ecc66f..28a5dd25 100644 --- a/ui/src/stores/streamStore.ts +++ b/ui/src/stores/streamStore.ts @@ -245,6 +245,7 @@ interface StreamState { status: ConnectionStatus; connectionMode: ConnectionMode; serverUrl: string; + moqToken: string; inputBroadcast: string; outputBroadcast: string; @@ -278,6 +279,7 @@ interface StreamState { // Actions setServerUrl: (url: string) => void; + setMoqToken: (token: string) => void; setInputBroadcast: (broadcast: string) => void; setOutputBroadcast: (broadcast: string) => void; setStatus: (status: ConnectionStatus) => void; @@ -311,6 +313,7 @@ export const useStreamStore = create((set, get) => ({ status: 'disconnected', connectionMode: 'session', serverUrl: '', + moqToken: '', inputBroadcast: 'input', outputBroadcast: 'output', enablePublish: true, @@ -336,6 +339,7 @@ export const useStreamStore = create((set, get) => ({ // Simple setters setServerUrl: (url) => set({ serverUrl: url }), + setMoqToken: (token) => set({ moqToken: token }), setInputBroadcast: (broadcast) => set({ inputBroadcast: broadcast }), setOutputBroadcast: (broadcast) => set({ outputBroadcast: broadcast }), setStatus: (status) => set({ status }), @@ -417,10 +421,15 @@ export const useStreamStore = create((set, get) => ({ try { logger.info('Step 1: Creating connection to relay server'); + const url = new URL(decision.trimmedServerUrl); + const jwt = get().moqToken.trim(); + if (jwt) { + url.searchParams.set('jwt', jwt); + } // Create connection to relay server with auto-reconnect // Hang will automatically fetch certificate fingerprints from http://host:port/certificate.sha256 attempt.connection = new Hang.Moq.Connection.Reload({ - url: new URL(decision.trimmedServerUrl), + url, enabled: true, }); diff --git a/ui/src/views/LoginView.tsx b/ui/src/views/LoginView.tsx new file mode 100644 index 00000000..8e0a16ab --- /dev/null +++ b/ui/src/views/LoginView.tsx @@ -0,0 +1,245 @@ +// SPDX-FileCopyrightText: © 2025 StreamKit Contributors +// +// SPDX-License-Identifier: MPL-2.0 + +import styled from '@emotion/styled'; +import React, { useEffect, useState } from 'react'; +import { useNavigate } from 'react-router-dom'; + +import { Button } from '@/components/ui/Button'; +import { fetchAuthMe, loginWithToken } from '@/services/auth'; +import { initializePermissions } from '@/services/permissions'; +import { ensureSchemasLoaded } from '@/stores/schemaStore'; +import { getLogger } from '@/utils/logger'; + +const logger = getLogger('LoginView'); + +const Container = styled.div` + box-sizing: border-box; + height: 100%; + width: 100%; + min-width: 0; + overflow: auto; + display: flex; + justify-content: center; + align-items: flex-start; + padding: 40px 24px; +`; + +const Card = styled.div` + box-sizing: border-box; + width: 100%; + max-width: 640px; + background: var(--sk-panel-bg); + border: 1px solid var(--sk-border); + border-radius: 12px; + padding: 24px; + display: flex; + flex-direction: column; + gap: 16px; +`; + +const Title = styled.h1` + margin: 0; + font-size: 20px; + font-weight: 700; + color: var(--sk-text); +`; + +const HelpText = styled.p` + margin: 0; + color: var(--sk-text-muted); + line-height: 1.5; + font-size: 14px; +`; + +const TipBox = styled.div` + padding: 12px; + border-radius: 10px; + border: 1px solid var(--sk-border); + background: color-mix(in srgb, var(--sk-primary) 8%, transparent); + display: flex; + gap: 10px; + align-items: flex-start; +`; + +const TipIcon = styled.div` + flex: 0 0 auto; + font-size: 16px; + line-height: 1; + margin-top: 1px; +`; + +const TipContent = styled.div` + display: flex; + flex-direction: column; + gap: 8px; + min-width: 0; +`; + +const TipText = styled.div` + color: var(--sk-text-muted); + line-height: 1.45; + font-size: 13px; +`; + +const CommandBlock = styled.code` + display: block; + padding: 8px 10px; + border-radius: 8px; + border: 1px solid var(--sk-border); + background: var(--sk-panel-bg); + color: var(--sk-text); + font-family: var(--sk-font-code); + font-size: 12px; + line-height: 1.4; + white-space: pre-wrap; + overflow-wrap: anywhere; +`; + +const Label = styled.label` + font-size: 13px; + color: var(--sk-text-muted); + display: flex; + flex-direction: column; + gap: 8px; +`; + +const TextArea = styled.textarea` + box-sizing: border-box; + width: 100%; + min-height: 120px; + padding: 12px; + border-radius: 10px; + border: 1px solid var(--sk-border); + background: var(--sk-bg); + color: var(--sk-text); + resize: vertical; + font-family: + ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, 'Liberation Mono', 'Courier New', + monospace; + font-size: 12px; + line-height: 1.5; +`; + +const ErrorBox = styled.div` + padding: 12px; + border-radius: 10px; + border: 1px solid var(--sk-border); + background: color-mix(in srgb, var(--sk-danger) 10%, transparent); + color: var(--sk-text); + font-size: 13px; +`; + +const Actions = styled.div` + display: flex; + gap: 12px; + align-items: center; + flex-wrap: wrap; +`; + +export interface LoginViewProps { + onLoggedIn?: () => void; +} + +const LoginView: React.FC = ({ onLoggedIn }) => { + const navigate = useNavigate(); + const [token, setToken] = useState(''); + const [isSubmitting, setIsSubmitting] = useState(false); + const [error, setError] = useState(null); + const [authEnabled, setAuthEnabled] = useState(null); + + useEffect(() => { + fetchAuthMe() + .then((me) => { + setAuthEnabled(me.auth_enabled); + if (me.auth_enabled && me.authenticated) { + navigate('/design', { replace: true }); + } + }) + .catch((e) => { + logger.error('Failed to check auth status:', e); + setAuthEnabled(null); + }); + }, [navigate]); + + const onLogin = async () => { + setError(null); + const trimmed = token.trim(); + if (!trimmed) { + setError('Paste a token to continue.'); + return; + } + + setIsSubmitting(true); + try { + await loginWithToken(trimmed); + await Promise.all([initializePermissions(), ensureSchemasLoaded()]); + onLoggedIn?.(); + navigate('/design', { replace: true }); + } catch (e) { + const message = e instanceof Error ? e.message : 'Login failed'; + setError(message); + } finally { + setIsSubmitting(false); + } + }; + + return ( + + + Sign in to StreamKit + + Paste an admin (or user) token to access this instance. StreamKit stores it in an HttpOnly + cookie, so your browser can authenticate without query params. + + {authEnabled === false && ( + + Authentication is disabled on this server. You can go straight to the app. + + )} + +