diff --git a/crates/edda-serve/src/api/analytics.rs b/crates/edda-serve/src/api/analytics.rs new file mode 100644 index 0000000..10e2f62 --- /dev/null +++ b/crates/edda-serve/src/api/analytics.rs @@ -0,0 +1,322 @@ +use std::sync::Arc; + +use axum::extract::{Query, State}; +use axum::routing::get; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_aggregate::aggregate::{aggregate_decisions, DateRange}; +use edda_aggregate::risk::{compute_decision_risks, DecisionInput}; +use edda_ledger::Ledger; +use edda_store::registry::list_projects; + +use crate::error::AppError; +use crate::state::AppState; + +use super::dashboard::compute_attention; + +// ── GET /api/recap ── + +#[derive(Deserialize)] +struct RecapQuery { + project: Option, + query: Option, + #[serde(rename = "since")] + _since: Option, + week: Option, + scope: Option, +} + +#[derive(Serialize)] +struct RecapAnchor { + #[serde(rename = "type")] + anchor_type: String, + value: String, +} + +#[derive(Serialize)] +struct NeedsYouItem { + severity: String, + summary: String, + action: String, +} + +#[derive(Serialize)] +struct DecisionItem { + key: String, + value: String, + reason: String, +} + +#[derive(Serialize)] +struct RelatedItem { + summary: String, + relevance: String, +} + +#[derive(Serialize)] +struct RecapLayers { + net_result: String, + needs_you: Vec, + decisions: Vec, + related: Vec, +} + +#[derive(Serialize)] +struct RecapMeta { + sessions_analyzed: usize, + llm_used: bool, + cached: bool, + #[serde(skip_serializing_if = "Option::is_none")] + cost_usd: Option, + #[serde(skip_serializing_if = "Option::is_none")] + fallback: Option, +} + +#[derive(Serialize)] +struct RecapResponse { + anchor: RecapAnchor, + layers: RecapLayers, + meta: RecapMeta, +} + +async fn get_recap( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + if state.chronicle.is_none() { + return Err(anyhow::anyhow!("chronicle feature not enabled").into()); + } + + let anchor = if let Some(ref project) = params.project { + RecapAnchor { + anchor_type: "project".to_string(), + value: project.clone(), + } + } else if let Some(ref query) = params.query { + RecapAnchor { + anchor_type: "query".to_string(), + value: query.clone(), + } + } else if params.week.unwrap_or(false) { + RecapAnchor { + anchor_type: "time".to_string(), + value: "week".to_string(), + } + } else if params.scope.as_deref() == Some("all") { + RecapAnchor { + anchor_type: "scope".to_string(), + value: "all".to_string(), + } + } else { + RecapAnchor { + anchor_type: "default".to_string(), + value: "current".to_string(), + } + }; + + // TODO: Replace with actual edda-chronicle integration when #173 is complete + // For now, return a stub response + let response = RecapResponse { + anchor, + layers: RecapLayers { + net_result: "Recap engine not yet integrated (depends on #173)".to_string(), + needs_you: vec![], + decisions: vec![], + related: vec![], + }, + meta: RecapMeta { + sessions_analyzed: 0, + llm_used: false, + cached: false, + cost_usd: None, + fallback: Some("stub".to_string()), + }, + }; + + Ok(Json(response)) +} + +// ── GET /api/recap/cached ── + +#[derive(Deserialize)] +struct RecapCachedQuery { + project: Option, +} + +async fn get_recap_cached( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + if state.chronicle.is_none() { + return Err(anyhow::anyhow!("chronicle feature not enabled").into()); + } + + let anchor = if let Some(ref project) = params.project { + RecapAnchor { + anchor_type: "project".to_string(), + value: project.clone(), + } + } else { + RecapAnchor { + anchor_type: "default".to_string(), + value: "current".to_string(), + } + }; + + // TODO: Replace with actual cache lookup when #176 is complete + // For now, return a 404-like response + let response = RecapResponse { + anchor, + layers: RecapLayers { + net_result: "No cached recap available".to_string(), + needs_you: vec![], + decisions: vec![], + related: vec![], + }, + meta: RecapMeta { + sessions_analyzed: 0, + llm_used: false, + cached: true, + cost_usd: None, + fallback: Some("cache_miss".to_string()), + }, + }; + + Ok(Json(response)) +} + +// ── GET /api/overview ── + +#[derive(Serialize)] +pub(crate) struct OverviewRedItem { + pub(crate) project: String, + pub(crate) summary: String, + pub(crate) action: String, + pub(crate) blocked_count: usize, +} + +#[derive(Serialize)] +pub(crate) struct OverviewYellowItem { + pub(crate) project: String, + pub(crate) summary: String, + pub(crate) eta: String, +} + +#[derive(Serialize)] +pub(crate) struct OverviewGreenItem { + pub(crate) project: String, + pub(crate) summary: String, +} + +#[derive(Serialize)] +pub(crate) struct OverviewResponse { + pub(crate) red: Vec, + pub(crate) yellow: Vec, + pub(crate) green: Vec, + pub(crate) updated_at: String, +} + +async fn get_overview( + State(state): State>, +) -> Result, AppError> { + if state.chronicle.is_none() { + return Err(anyhow::anyhow!("chronicle feature not enabled").into()); + } + + let projects = list_projects(); + let range = DateRange { + after: Some({ + let now = time::OffsetDateTime::now_utc(); + let from = now - time::Duration::days(7); + from.format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default()[..10] + .to_string() + }), + before: None, + }; + + // Compute decisions + risks for attention routing + let decisions = aggregate_decisions(&projects); + let now_iso = time::OffsetDateTime::now_utc() + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + let decision_inputs: Vec = decisions + .iter() + .map(|d| DecisionInput { + event_id: d.event_id.clone(), + key: d.key.clone(), + value: d.value.clone(), + project: d.project_name.clone(), + ts: d.ts.clone(), + }) + .collect(); + + // TODO: This event-loading block is duplicated in get_dashboard; extract into a shared helper in a follow-up. + let mut all_events = Vec::new(); + for entry in &projects { + let root = std::path::Path::new(&entry.path); + if let Ok(ledger) = Ledger::open(root) { + if let Ok(events) = ledger.iter_events() { + all_events.extend(events); + } + } + } + + let risks = compute_decision_risks( + &decision_inputs, + &all_events, + &now_iso, + &std::collections::HashSet::new(), + ); + + let response = compute_attention(&risks, &projects, &range, &[], 7); + Ok(Json(response)) +} + +// ── GET /api/projects ── + +#[derive(Serialize)] +struct ProjectStatus { + name: String, + id: String, + last_activity: String, + status: String, +} + +#[derive(Serialize)] +struct ProjectsResponse { + projects: Vec, +} + +async fn get_projects( + State(state): State>, +) -> Result, AppError> { + if state.chronicle.is_none() { + return Err(anyhow::anyhow!("chronicle feature not enabled").into()); + } + + let projects = list_projects(); + let project_statuses: Vec = projects + .into_iter() + .map(|p| ProjectStatus { + name: p.name, + id: p.project_id, + last_activity: p.last_seen, + status: "unknown".to_string(), // TODO: Calculate from overview + }) + .collect(); + + Ok(Json(ProjectsResponse { + projects: project_statuses, + })) +} + +/// Analytics routes (recap, overview, projects). +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/recap", get(get_recap)) + .route("/api/recap/cached", get(get_recap_cached)) + .route("/api/overview", get(get_overview)) + .route("/api/projects", get(get_projects)) +} diff --git a/crates/edda-serve/src/api/auth.rs b/crates/edda-serve/src/api/auth.rs new file mode 100644 index 0000000..8e7849f --- /dev/null +++ b/crates/edda-serve/src/api/auth.rs @@ -0,0 +1,337 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use axum::extract::rejection::JsonRejection; +use axum::extract::{Query, State}; +use axum::http::HeaderMap; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use crate::error::AppError; +use crate::middleware::generate_pairing_token; +use crate::state::{AppState, PairingRequest}; + +// ── Pairing Endpoints ── + +#[derive(Deserialize)] +struct CreatePairingRequest { + device_name: String, +} + +#[derive(Serialize)] +struct CreatePairingResponse { + pairing_url: String, + pairing_token: String, + expires_in_seconds: u64, +} + +/// POST /api/pair/new — Create a pairing request (generates one-time pairing token). +async fn create_pairing( + State(state): State>, + headers: HeaderMap, + body: Result, JsonRejection>, +) -> Result, AppError> { + let Json(req) = body.map_err(|e| AppError::Validation(e.to_string()))?; + + if req.device_name.is_empty() { + return Err(AppError::Validation("device_name is required".to_string())); + } + + let pairing_token = generate_pairing_token(); + let ttl = Duration::from_secs(600); // 10 minutes + + { + let mut pairings = state + .pending_pairings + .lock() + .map_err(|e| AppError::Internal(anyhow::anyhow!("lock poisoned: {e}")))?; + + // Clean up expired pairings + let now = std::time::Instant::now(); + pairings.retain(|_, v| v.expires_at > now); + + pairings.insert( + pairing_token.clone(), + PairingRequest { + device_name: req.device_name, + expires_at: now + ttl, + }, + ); + } + + // Determine host from request headers for URL construction + let host = headers + .get("host") + .and_then(|v| v.to_str().ok()) + .unwrap_or("localhost:7433"); + + let pairing_url = format!("http://{host}/pair?token={pairing_token}"); + + Ok(Json(CreatePairingResponse { + pairing_url, + pairing_token, + expires_in_seconds: 600, + })) +} + +#[derive(Deserialize)] +struct CompletePairingQuery { + token: String, +} + +#[derive(Serialize)] +struct CompletePairingResponse { + device_token: String, + device_name: String, +} + +/// GET /pair?token= — Complete pairing (the URL the device visits). +async fn complete_pairing( + State(state): State>, + headers: HeaderMap, + Query(query): Query, +) -> Result, AppError> { + // Extract and validate the pairing token + let pairing_req = { + let mut pairings = state + .pending_pairings + .lock() + .map_err(|e| AppError::Internal(anyhow::anyhow!("lock poisoned: {e}")))?; + + let now = std::time::Instant::now(); + pairings.retain(|_, v| v.expires_at > now); + + pairings.remove(&query.token) + }; + + let pairing_req = pairing_req + .ok_or_else(|| AppError::Validation("invalid or expired pairing token".to_string()))?; + + // Generate the long-lived device token + let device_token = edda_ledger::device_token::generate_device_token(); + let token_hash = edda_ledger::device_token::hash_token(&device_token); + + let now = time::OffsetDateTime::now_utc(); + let paired_at = now + .format(&time::format_description::well_known::Rfc3339) + .map_err(|e| AppError::Internal(anyhow::anyhow!("time format error: {e}")))?; + + let from_ip = headers + .get("x-forwarded-for") + .and_then(|v| v.to_str().ok()) + .unwrap_or("unknown") + .to_string(); + let event_id = format!("evt_{}", ulid::Ulid::new()); + + // Write device_pair event to ledger + let ledger = state.open_ledger().context("GET /pair")?; + let branch = ledger.head_branch()?; + + let payload = serde_json::json!({ + "device_name": pairing_req.device_name, + "paired_from_ip": from_ip, + "token_hash_prefix": &token_hash[..8], + }); + + let parent_hash = ledger.last_event_hash()?; + let mut event = edda_core::types::Event { + event_id: event_id.clone(), + ts: paired_at.clone(), + event_type: "device_pair".to_string(), + branch: branch.clone(), + parent_hash, + hash: String::new(), + payload, + refs: Default::default(), + schema_version: edda_core::types::SCHEMA_VERSION, + digests: vec![], + event_family: Some(edda_core::types::event_family::ADMIN.to_string()), + event_level: Some(edda_core::types::event_level::INFO.to_string()), + }; + + edda_core::event::finalize_event(&mut event)?; + ledger.append_event(&event)?; + + // Insert into device_tokens table + ledger.insert_device_token(&edda_ledger::DeviceTokenRow { + token_hash, + device_name: pairing_req.device_name.clone(), + paired_at, + paired_from_ip: from_ip, + revoked_at: None, + pair_event_id: event_id, + revoke_event_id: None, + })?; + + Ok(Json(CompletePairingResponse { + device_token, + device_name: pairing_req.device_name, + })) +} + +#[derive(Serialize)] +struct DeviceInfo { + device_name: String, + paired_at: String, + status: String, + revoked_at: Option, +} + +/// GET /api/pair/list — List all paired devices. +async fn list_paired_devices( + State(state): State>, +) -> Result>, AppError> { + let ledger = state.open_ledger().context("GET /api/pair/list")?; + let tokens = ledger.list_device_tokens()?; + + let devices: Vec = tokens + .into_iter() + .map(|t| DeviceInfo { + device_name: t.device_name, + paired_at: t.paired_at, + status: if t.revoked_at.is_some() { + "revoked".to_string() + } else { + "active".to_string() + }, + revoked_at: t.revoked_at, + }) + .collect(); + + Ok(Json(devices)) +} + +#[derive(Deserialize)] +struct RevokeDeviceRequest { + device_name: String, +} + +/// POST /api/pair/revoke — Revoke a specific device. +async fn revoke_device( + State(state): State>, + body: Result, JsonRejection>, +) -> Result, AppError> { + let Json(req) = body.map_err(|e| AppError::Validation(e.to_string()))?; + + let ledger = state.open_ledger().context("POST /api/pair/revoke")?; + + // Check the token exists *before* writing the ledger event + let existing = ledger.list_device_tokens()?; + let has_active = existing + .iter() + .any(|t| t.device_name == req.device_name && t.revoked_at.is_none()); + if !has_active { + return Err(AppError::NotFound(format!( + "no active device token found for '{}'", + req.device_name + ))); + } + + let event_id = format!("evt_{}", ulid::Ulid::new()); + let branch = ledger.head_branch()?; + + let now = time::OffsetDateTime::now_utc(); + let ts = now + .format(&time::format_description::well_known::Rfc3339) + .map_err(|e| AppError::Internal(anyhow::anyhow!("time format error: {e}")))?; + + let payload = serde_json::json!({ + "device_name": req.device_name, + }); + + let parent_hash = ledger.last_event_hash()?; + let mut event = edda_core::types::Event { + event_id: event_id.clone(), + ts, + event_type: "device_revoke".to_string(), + branch: branch.clone(), + parent_hash, + hash: String::new(), + payload, + refs: Default::default(), + schema_version: edda_core::types::SCHEMA_VERSION, + digests: vec![], + event_family: Some(edda_core::types::event_family::ADMIN.to_string()), + event_level: Some(edda_core::types::event_level::INFO.to_string()), + }; + + edda_core::event::finalize_event(&mut event)?; + ledger.append_event(&event)?; + ledger.revoke_device_token(&req.device_name, &event_id)?; + + Ok(Json(serde_json::json!({ + "ok": true, + "device_name": req.device_name, + "event_id": event_id, + }))) +} + +/// POST /api/pair/revoke-all — Revoke all active device tokens. +async fn revoke_all_devices( + State(state): State>, +) -> Result, AppError> { + let event_id = format!("evt_{}", ulid::Ulid::new()); + let ledger = state.open_ledger().context("POST /api/pair/revoke-all")?; + let branch = ledger.head_branch()?; + + let now = time::OffsetDateTime::now_utc(); + let ts = now + .format(&time::format_description::well_known::Rfc3339) + .map_err(|e| AppError::Internal(anyhow::anyhow!("time format error: {e}")))?; + + let payload = serde_json::json!({ "revoke_all": true }); + + let parent_hash = ledger.last_event_hash()?; + let mut event = edda_core::types::Event { + event_id: event_id.clone(), + ts, + event_type: "device_revoke".to_string(), + branch: branch.clone(), + parent_hash, + hash: String::new(), + payload, + refs: Default::default(), + schema_version: edda_core::types::SCHEMA_VERSION, + digests: vec![], + event_family: Some(edda_core::types::event_family::ADMIN.to_string()), + event_level: Some(edda_core::types::event_level::INFO.to_string()), + }; + + edda_core::event::finalize_event(&mut event)?; + ledger.append_event(&event)?; + + let count = ledger.revoke_all_device_tokens(&event_id)?; + + Ok(Json(serde_json::json!({ + "ok": true, + "revoked_count": count, + "event_id": event_id, + }))) +} + +/// Public auth routes (no auth required). +pub(crate) fn public_routes() -> Router> { + Router::new().route("/pair", get(complete_pairing)) +} + +/// Protected auth routes (auth middleware applied). +pub(crate) fn protected_routes() -> Router> { + Router::new() + .route("/api/pair/new", post(create_pairing)) + .route("/api/pair/list", get(list_paired_devices)) + .route("/api/pair/revoke", post(revoke_device)) + .route("/api/pair/revoke-all", post(revoke_all_devices)) +} + +/// All auth routes (for test router without auth middleware). +#[cfg(test)] +pub(crate) fn routes() -> Router> { + Router::new() + .route("/pair", get(complete_pairing)) + .route("/api/pair/new", post(create_pairing)) + .route("/api/pair/list", get(list_paired_devices)) + .route("/api/pair/revoke", post(revoke_device)) + .route("/api/pair/revoke-all", post(revoke_all_devices)) +} diff --git a/crates/edda-serve/src/api/briefs.rs b/crates/edda-serve/src/api/briefs.rs new file mode 100644 index 0000000..47ca9f0 --- /dev/null +++ b/crates/edda-serve/src/api/briefs.rs @@ -0,0 +1,158 @@ +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::{Path as AxumPath, Query, State}; +use axum::response::IntoResponse; +use axum::routing::get; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_core::policy::{self, ActorKind}; + +use crate::error::AppError; +use crate::state::AppState; + +// ── GET /api/actors ── + +#[derive(Serialize)] +struct ActorResponse { + name: String, + kind: ActorKind, + roles: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + email: Option, + #[serde(skip_serializing_if = "Option::is_none")] + display_name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + runtime: Option, +} + +#[derive(Serialize)] +struct ActorsListResponse { + actors: Vec, +} + +async fn get_actors( + State(state): State>, +) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/actors")?; + let cfg = policy::load_actors_from_dir(&ledger.paths.edda_dir)?; + let actors = cfg + .actors + .into_iter() + .map(|(name, def)| ActorResponse { + name, + kind: def.kind, + roles: def.roles, + email: def.email, + display_name: def.display_name, + runtime: def.runtime, + }) + .collect(); + Ok(Json(ActorsListResponse { actors })) +} + +// ── GET /api/actors/:name ── + +async fn get_actor( + State(state): State>, + AxumPath(name): AxumPath, +) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/actors/:name")?; + let cfg = policy::load_actors_from_dir(&ledger.paths.edda_dir)?; + match cfg.actors.get(&name) { + Some(def) => Ok(Json(ActorResponse { + name, + kind: def.kind.clone(), + roles: def.roles.clone(), + email: def.email.clone(), + display_name: def.display_name.clone(), + runtime: def.runtime.clone(), + })), + None => Err(AppError::NotFound(format!("Actor '{name}' not found"))), + } +} +// ── GET /dashboard (HTML) ── + +async fn serve_dashboard() -> impl IntoResponse { + axum::response::Html(include_str!("../../static/dashboard.html")) +} + +// ── GET /api/briefs ── + +#[derive(Deserialize)] +struct BriefsQuery { + status: Option, + intent: Option, +} + +async fn get_briefs( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/briefs")?; + let briefs = ledger.list_task_briefs(params.status.as_deref(), params.intent.as_deref())?; + + let items: Vec = briefs + .iter() + .map(|b| { + serde_json::json!({ + "task_id": b.task_id, + "intake_event_id": b.intake_event_id, + "title": b.title, + "intent": b.intent.as_str(), + "source_url": b.source_url, + "status": b.status.as_str(), + "branch": b.branch, + "iterations": b.iterations, + "artifacts": serde_json::from_str::(&b.artifacts).unwrap_or_default(), + "decisions": serde_json::from_str::(&b.decisions).unwrap_or_default(), + "last_feedback": b.last_feedback, + "created_at": b.created_at, + "updated_at": b.updated_at, + }) + }) + .collect(); + + Ok(Json( + serde_json::json!({ "briefs": items, "count": items.len() }), + )) +} + +// ── GET /api/briefs/:task_id ── + +async fn get_brief( + State(state): State>, + AxumPath(task_id): AxumPath, +) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/briefs/:task_id")?; + let brief = ledger + .get_task_brief(&task_id)? + .ok_or_else(|| AppError::NotFound(format!("task brief not found: {task_id}")))?; + + Ok(Json(serde_json::json!({ + "task_id": brief.task_id, + "intake_event_id": brief.intake_event_id, + "title": brief.title, + "intent": brief.intent.as_str(), + "source_url": brief.source_url, + "status": brief.status.as_str(), + "branch": brief.branch, + "iterations": brief.iterations, + "artifacts": serde_json::from_str::(&brief.artifacts).unwrap_or_default(), + "decisions": serde_json::from_str::(&brief.decisions).unwrap_or_default(), + "last_feedback": brief.last_feedback, + "created_at": brief.created_at, + "updated_at": brief.updated_at, + }))) +} + +/// Briefs, actors, and dashboard HTML routes. +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/actors", get(get_actors)) + .route("/api/actors/{name}", get(get_actor)) + .route("/api/briefs", get(get_briefs)) + .route("/api/briefs/{task_id}", get(get_brief)) + .route("/dashboard", get(serve_dashboard)) +} diff --git a/crates/edda-serve/src/api/dashboard.rs b/crates/edda-serve/src/api/dashboard.rs new file mode 100644 index 0000000..bd28c4d --- /dev/null +++ b/crates/edda-serve/src/api/dashboard.rs @@ -0,0 +1,354 @@ +use std::sync::Arc; + +use axum::extract::{Query, State}; +use axum::routing::get; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_aggregate::aggregate::{ + aggregate_decisions, aggregate_overview, per_project_metrics, DateRange, ProjectMetrics, +}; +use edda_aggregate::graph::build_dependency_graph; +use edda_aggregate::risk::{compute_decision_risks, DecisionInput, DecisionRisk}; +use edda_ledger::Ledger; +use edda_store::registry::list_projects; + +use crate::error::AppError; +use crate::state::AppState; + +use super::analytics::{OverviewGreenItem, OverviewRedItem, OverviewResponse, OverviewYellowItem}; + +// ── GET /api/dashboard ── + +#[derive(Deserialize)] +struct DashboardQuery { + #[serde(default = "default_days")] + days: usize, +} + +fn default_days() -> usize { + 7 +} + +#[derive(Serialize)] +struct DashboardResponse { + period: DashboardPeriod, + summary: DashboardSummary, + attention: OverviewResponse, + timeline: Vec, + graph: edda_aggregate::graph::DependencyGraph, + risks: Vec, + project_metrics: Vec, +} + +#[derive(Serialize)] +pub(crate) struct DashboardPeriod { + pub(crate) from: String, + pub(crate) to: String, + pub(crate) days: usize, +} + +#[derive(Serialize)] +struct DashboardSummary { + total_projects: usize, + total_decisions: usize, + total_events: usize, + total_commits: usize, + total_cost_usd: f64, + overall_success_rate: f64, +} + +#[derive(Serialize)] +struct TimelineEntry { + ts: String, + event_type: String, + key: String, + value: String, + reason: String, + project: String, + risk_level: String, + supersedes: Option, +} + +async fn get_dashboard( + State(_state): State>, + Query(params): Query, +) -> Result, AppError> { + let projects = list_projects(); + + let now = time::OffsetDateTime::now_utc(); + let from_date = now - time::Duration::days(params.days as i64); + let to_str = now + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + let from_str = from_date + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + + let range = DateRange { + after: Some(from_str[..10].to_string()), + before: None, + }; + + // Summary + let agg = aggregate_overview(&projects, &range); + + // Decisions + risk scoring + let decisions = aggregate_decisions(&projects); + let now_iso = &to_str; + + let decision_inputs: Vec = decisions + .iter() + .map(|d| DecisionInput { + event_id: d.event_id.clone(), + key: d.key.clone(), + value: d.value.clone(), + project: d.project_name.clone(), + ts: d.ts.clone(), + }) + .collect(); + + // Collect all events for risk computation + // TODO: This event-loading block is duplicated in get_overview; extract into a shared helper in a follow-up. + let mut all_events = Vec::new(); + for entry in &projects { + let root = std::path::Path::new(&entry.path); + if let Ok(ledger) = Ledger::open(root) { + if let Ok(events) = ledger.iter_events() { + all_events.extend(events); + } + } + } + + // Cross-project: decision IDs that appear in provenance of events from OTHER projects + let mut cross_project_ids = std::collections::HashSet::new(); + for entry in &projects { + let root = std::path::Path::new(&entry.path); + if let Ok(ledger) = Ledger::open(root) { + if let Ok(events) = ledger.iter_events() { + for event in &events { + for prov in &event.refs.provenance { + // If this event references a decision from another project + for d in &decisions { + if d.event_id == prov.target && d.project_name != entry.name { + cross_project_ids.insert(d.event_id.clone()); + } + } + } + } + } + } + } + + let risks = compute_decision_risks(&decision_inputs, &all_events, now_iso, &cross_project_ids); + + // Build risk lookup for timeline entries + let risk_map: std::collections::HashMap<&str, &str> = risks + .iter() + .map(|r| (r.event_id.as_str(), r.risk_level.as_str())) + .collect(); + + // Timeline: decisions sorted by timestamp descending + let mut timeline: Vec = decisions + .iter() + .map(|d| { + let risk_level = risk_map + .get(d.event_id.as_str()) + .unwrap_or(&"low") + .to_string(); + TimelineEntry { + ts: d.ts.clone().unwrap_or_default(), + event_type: "decision".to_string(), + key: d.key.clone(), + value: d.value.clone(), + reason: d.reason.clone(), + project: d.project_name.clone(), + risk_level, + supersedes: None, // Would need provenance walk + } + }) + .collect(); + timeline.sort_by(|a, b| b.ts.cmp(&a.ts)); + + // Dependency graph + let graph = build_dependency_graph(&projects); + + // Per-project metrics + let project_metrics = per_project_metrics(&projects, &range, params.days); + + // Compute cost totals for summary + let total_cost: f64 = project_metrics.iter().map(|m| m.cost.total_usd).sum(); + let total_steps: u64 = project_metrics.iter().map(|m| m.quality.total_steps).sum(); + let total_success: u64 = project_metrics + .iter() + .map(|m| (m.quality.success_rate * m.quality.total_steps as f64) as u64) + .sum(); + let overall_success_rate = if total_steps > 0 { + total_success as f64 / total_steps as f64 + } else { + 0.0 + }; + + // Attention routing (with cost anomaly detection) + let attention = compute_attention(&risks, &projects, &range, &project_metrics, params.days); + + let period = DashboardPeriod { + from: from_str[..10].to_string(), + to: to_str[..10].to_string(), + days: params.days, + }; + + let summary = DashboardSummary { + total_projects: agg.projects.len(), + total_decisions: agg.total_decisions, + total_events: agg.total_events, + total_commits: agg.total_commits, + total_cost_usd: total_cost, + overall_success_rate, + }; + + Ok(Json(DashboardResponse { + period, + summary, + attention, + timeline, + graph, + risks, + project_metrics, + })) +} + +/// Compute attention routing: red / yellow / green classification. +/// +/// Includes cost anomaly detection when `project_metrics` is non-empty: +/// - Yellow: project daily cost > 2x period average +/// - Red: project daily cost > 5x period average +pub(crate) fn compute_attention( + risks: &[DecisionRisk], + projects: &[edda_store::registry::ProjectEntry], + range: &DateRange, + project_metrics: &[ProjectMetrics], + days: usize, +) -> OverviewResponse { + let mut red = Vec::new(); + let mut yellow = Vec::new(); + let mut green = Vec::new(); + + let now = time::OffsetDateTime::now_utc(); + let updated_at = now + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_else(|_| "unknown".to_string()); + + // Red: high-risk decisions + for r in risks { + if r.risk_level == "high" { + red.push(OverviewRedItem { + project: r.project.clone(), + summary: format!( + "{} = {} (risk {:.0}%)", + r.key, + r.value, + r.risk_score * 100.0 + ), + action: "Review before overriding".to_string(), + blocked_count: 0, + }); + } + } + + // Yellow: medium-risk decisions + for r in risks { + if r.risk_level == "medium" { + yellow.push(OverviewYellowItem { + project: r.project.clone(), + summary: format!( + "{} = {} (risk {:.0}%)", + r.key, + r.value, + r.risk_score * 100.0 + ), + eta: String::new(), + }); + } + } + + // Cost anomaly detection + if days > 0 { + for pm in project_metrics { + let daily_avg = pm.cost.daily_avg_usd; + if daily_avg > 0.0 && pm.cost.last_day_usd > 0.0 { + // Use the actual most-recent-day cost from rollup data + let last_day_cost = pm.cost.last_day_usd; + if last_day_cost > daily_avg * 5.0 { + red.push(OverviewRedItem { + project: pm.name.clone(), + summary: format!( + "Cost spike: ${:.2}/day (5x above ${:.2} avg)", + last_day_cost, daily_avg + ), + action: "Investigate cost increase".to_string(), + blocked_count: 0, + }); + } else if last_day_cost > daily_avg * 2.0 { + yellow.push(OverviewYellowItem { + project: pm.name.clone(), + summary: format!( + "Elevated cost: ${:.2}/day (2x above ${:.2} avg)", + last_day_cost, daily_avg + ), + eta: String::new(), + }); + } + } + } + } + + // Red: stale projects (no events in range) + for entry in projects { + let root = std::path::Path::new(&entry.path); + let has_events = Ledger::open(root) + .and_then(|l| l.iter_events()) + .map(|events| events.iter().any(|e| range.matches(&e.ts))) + .unwrap_or(false); + if !has_events { + red.push(OverviewRedItem { + project: entry.name.clone(), + summary: "No activity in period".to_string(), + action: "Check project status".to_string(), + blocked_count: 0, + }); + } + } + + // Green: projects with normal activity + for entry in projects { + let root = std::path::Path::new(&entry.path); + let has_events = Ledger::open(root) + .and_then(|l| l.iter_events()) + .map(|events| events.iter().any(|e| range.matches(&e.ts))) + .unwrap_or(false); + if has_events { + let high_risk = risks + .iter() + .any(|r| r.project == entry.name && r.risk_level == "high"); + if !high_risk { + green.push(OverviewGreenItem { + project: entry.name.clone(), + summary: "Normal activity".to_string(), + }); + } + } + } + + OverviewResponse { + red, + yellow, + green, + updated_at, + } +} + +/// Dashboard routes. +pub(crate) fn routes() -> Router> { + Router::new().route("/api/dashboard", get(get_dashboard)) +} diff --git a/crates/edda-serve/src/api/drafts.rs b/crates/edda-serve/src/api/drafts.rs new file mode 100644 index 0000000..05e8137 --- /dev/null +++ b/crates/edda-serve/src/api/drafts.rs @@ -0,0 +1,526 @@ +use std::path::Path; +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::rejection::JsonRejection; +use axum::extract::{Path as AxumPath, State}; +use axum::http::{HeaderMap, StatusCode}; +use axum::response::{IntoResponse, Response}; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_core::agent_phase::{mobile_context_summary, AgentPhaseState}; +use edda_core::event::{new_approval_event, ApprovalEventParams}; +use edda_ledger::lock::WorkspaceLock; +use edda_ledger::Ledger; + +use crate::error::AppError; +use crate::state::AppState; + +// ── GET /api/drafts ── + +#[derive(Serialize)] +struct DraftItem { + draft_id: String, + title: String, + stage_id: String, + role: String, + approved: usize, + min_approvals: usize, + #[serde(skip_serializing_if = "Option::is_none")] + risk_level: Option, + #[serde(skip_serializing_if = "Option::is_none")] + phase: Option, + #[serde(skip_serializing_if = "Option::is_none")] + agent: Option, + #[serde(skip_serializing_if = "Option::is_none")] + issue: Option, + #[serde(skip_serializing_if = "Option::is_none")] + context_summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + requested_at: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + labels: Vec, +} + +#[derive(Serialize)] +struct DraftsResponse { + drafts: Vec, +} + +#[derive(Deserialize)] +struct MinimalDraft { + #[serde(default)] + draft_id: String, + #[serde(default)] + title: String, + #[serde(default)] + status: String, + #[serde(default)] + stages: Vec, + #[serde(default)] + labels: Vec, + #[serde(default)] + created_at: Option, + #[serde(default)] + branch: String, +} + +#[derive(Deserialize)] +struct MinimalStage { + #[serde(default)] + stage_id: String, + #[serde(default)] + role: String, + #[serde(default)] + min_approvals: usize, + #[serde(default)] + approved_by: Vec, + #[serde(default)] + status: String, +} + +async fn get_drafts(State(state): State>) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/drafts")?; + let drafts_dir = &ledger.paths.drafts_dir; + + if !drafts_dir.exists() { + return Ok(Json(DraftsResponse { drafts: vec![] })); + } + + // Load agent phase states for context enrichment + let phase_states = load_agent_phase_states(&state.repo_root); + + // Load recent decisions/commits for context summary + let head = ledger.head_branch().unwrap_or_default(); + let recent_decisions = recent_decision_summaries(&ledger, &head, 3); + let recent_commits = recent_commit_summaries(&ledger, &head, 3); + + let mut items = Vec::new(); + for entry in std::fs::read_dir(drafts_dir)? { + let entry = entry?; + let path = entry.path(); + + if path.extension().and_then(|e| e.to_str()) != Some("json") { + continue; + } + if path.file_stem().and_then(|s| s.to_str()) == Some("latest") { + continue; + } + + let content = std::fs::read_to_string(&path)?; + let draft: MinimalDraft = match serde_json::from_str(&content) { + Ok(d) => d, + Err(_) => continue, + }; + + if draft.status == "applied" { + continue; + } + + // Try to find a matching agent phase state (by branch or label) + let matched_phase = phase_states.iter().find(|ps| { + ps.branch.as_deref() == Some(&draft.branch) || ps.session_id == draft.draft_id + }); + + let (phase, agent, issue, context_summary) = if let Some(ps) = matched_phase { + let summary = mobile_context_summary(ps, &recent_decisions, &recent_commits, 200); + ( + Some(ps.phase.to_string()), + Some(ps.session_id.clone()), + ps.issue, + Some(summary), + ) + } else { + (None, None, None, None) + }; + + // Derive risk_level from labels if present + let risk_level = draft + .labels + .iter() + .find(|l| l.starts_with("risk:") || l.contains("risk")) + .map(|l| l.strip_prefix("risk:").unwrap_or(l).to_string()) + .or_else(|| { + if draft.labels.iter().any(|l| l == "high-risk") { + Some("high".to_string()) + } else { + None + } + }); + + for stage in &draft.stages { + if stage.status != "pending" { + continue; + } + items.push(DraftItem { + draft_id: draft.draft_id.clone(), + title: draft.title.clone(), + stage_id: stage.stage_id.clone(), + role: stage.role.clone(), + approved: stage.approved_by.len(), + min_approvals: stage.min_approvals, + risk_level: risk_level.clone(), + phase: phase.clone(), + agent: agent.clone(), + issue, + context_summary: context_summary.clone(), + requested_at: draft.created_at.clone(), + labels: draft.labels.clone(), + }); + } + } + + Ok(Json(DraftsResponse { drafts: items })) +} + +/// Load agent phase state files from `.edda/agent-phases/`. +fn load_agent_phase_states(repo_root: &Path) -> Vec { + let phases_dir = repo_root.join(".edda").join("agent-phases"); + if !phases_dir.exists() { + return Vec::new(); + } + let entries = match std::fs::read_dir(&phases_dir) { + Ok(e) => e, + Err(_) => return Vec::new(), + }; + let mut states = Vec::new(); + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().and_then(|e| e.to_str()) != Some("json") { + continue; + } + if let Ok(content) = std::fs::read_to_string(&path) { + if let Ok(ps) = serde_json::from_str::(&content) { + states.push(ps); + } + } + } + states +} + +/// Fetch recent decision summaries from the ledger for context generation. +fn recent_decision_summaries(ledger: &Ledger, branch: &str, limit: usize) -> Vec { + let events = ledger + .iter_events_filtered(branch, Some("decision"), None, None, None, limit) + .unwrap_or_default(); + events + .iter() + .filter_map(|e| { + let key = e.payload.get("key")?.as_str()?; + let value = e.payload.get("value")?.as_str()?; + Some(format!("{key}={value}")) + }) + .collect() +} + +/// Fetch recent commit summaries from the ledger for context generation. +fn recent_commit_summaries(ledger: &Ledger, branch: &str, limit: usize) -> Vec { + let events = ledger + .iter_events_filtered(branch, Some("commit"), None, None, None, limit) + .unwrap_or_default(); + events + .iter() + .filter_map(|e| { + e.payload + .get("title") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + }) + .collect() +} + +// ── POST /api/drafts/:id/approve ── + +#[derive(Deserialize)] +struct ApproveRequest { + #[serde(default)] + reason: Option, + #[serde(default)] + actor: Option, + #[serde(default)] + stage: Option, +} + +#[derive(Serialize)] +struct ApprovalResponse { + event_id: String, + draft_status: String, + stage_status: String, +} + +async fn post_draft_approve( + State(state): State>, + headers: HeaderMap, + AxumPath(draft_id): AxumPath, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + handle_draft_action(&state, &headers, &draft_id, "approve", &body).await +} + +// ── POST /api/drafts/:id/deny ── + +async fn post_draft_deny( + State(state): State>, + headers: HeaderMap, + AxumPath(draft_id): AxumPath, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + handle_draft_action(&state, &headers, &draft_id, "deny", &body).await +} + +/// Shared handler for approve/deny actions on drafts. +async fn handle_draft_action( + state: &AppState, + headers: &HeaderMap, + draft_id: &str, + action: &str, + body: &ApproveRequest, +) -> Result { + let ledger = state.open_ledger().context("POST /api/drafts/:id/action")?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + // Read the draft + let draft_path = ledger.paths.drafts_dir.join(format!("{draft_id}.json")); + if !draft_path.exists() { + return Err(AppError::NotFound(format!("draft not found: {draft_id}"))); + } + let content = std::fs::read_to_string(&draft_path)?; + let mut draft: serde_json::Value = serde_json::from_str(&content)?; + + // Check draft status + let draft_status = draft + .get("status") + .and_then(|v| v.as_str()) + .unwrap_or("proposed"); + if draft_status == "applied" || draft_status == "rejected" { + return Err(AppError::Conflict(format!( + "draft {draft_id} is already {draft_status}" + ))); + } + + let actor = body.actor.as_deref().unwrap_or("human"); + let reason = body.reason.as_deref().unwrap_or(""); + let device_id = headers + .get("x-edda-device-id") + .and_then(|v| v.to_str().ok()); + + let decision = if action == "approve" { + "approve" + } else { + "reject" + }; + + let head = ledger.head_branch()?; + + // Compute draft SHA256 + let draft_sha256 = { + use sha2::Digest; + let bytes = std::fs::read(&draft_path)?; + let mut hasher = sha2::Sha256::new(); + hasher.update(&bytes); + hex::encode(hasher.finalize()) + }; + + let parent_hash = ledger.last_event_hash()?; + + // Handle stage-aware drafts + let stages = draft + .get("stages") + .and_then(|v| v.as_array()) + .cloned() + .unwrap_or_default(); + + let (stage_id, stage_role, stage_status) = if !stages.is_empty() { + // Determine which stage to act on + let requested_stage = body.stage.as_deref(); + let target_stage = if let Some(sid) = requested_stage { + stages + .iter() + .find(|s| s.get("stage_id").and_then(|v| v.as_str()) == Some(sid)) + .ok_or_else(|| AppError::NotFound(format!("stage not found: {sid}")))? + } else { + // Auto-select the first pending stage + stages + .iter() + .find(|s| s.get("status").and_then(|v| v.as_str()) == Some("pending")) + .ok_or_else(|| AppError::Conflict("no pending stages remain".to_string()))? + }; + + let sid = target_stage + .get("stage_id") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let role = target_stage + .get("role") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let st_status = target_stage + .get("status") + .and_then(|v| v.as_str()) + .unwrap_or("pending") + .to_string(); + + if st_status != "pending" { + return Err(AppError::Conflict(format!( + "stage '{sid}' is already {st_status}" + ))); + } + + (sid, role, st_status) + } else { + (String::new(), String::new(), "pending".to_string()) + }; + + // Replay protection: stage already acted on + if stage_status != "pending" { + return Err(AppError::Conflict(format!( + "draft {draft_id} stage '{}' is already {stage_status}", + stage_id + ))); + } + + // Create approval event + let event = new_approval_event(&ApprovalEventParams { + branch: &head, + parent_hash: parent_hash.as_deref(), + draft_id, + draft_sha256: &draft_sha256, + decision, + actor, + note: reason, + stage_id: &stage_id, + role: &stage_role, + device_id, + })?; + ledger.append_event(&event)?; + + // Update draft JSON + let ts = event.ts.clone(); + let approval_record = serde_json::json!({ + "ts": ts, + "actor": actor, + "decision": decision, + "note": reason, + "approval_event_id": event.event_id, + "stage_id": stage_id, + "role": stage_role, + }); + + // Append to approvals array + if let Some(approvals) = draft.get_mut("approvals") { + if let Some(arr) = approvals.as_array_mut() { + arr.push(approval_record); + } + } else { + draft["approvals"] = serde_json::json!([approval_record]); + } + + // Update stage status + let mut new_stage_status = "pending".to_string(); + if let Some(stages_arr) = draft.get_mut("stages").and_then(|v| v.as_array_mut()) { + for stage in stages_arr.iter_mut() { + let sid = stage.get("stage_id").and_then(|v| v.as_str()).unwrap_or(""); + if sid == stage_id { + if decision == "reject" { + stage["status"] = serde_json::Value::String("rejected".to_string()); + new_stage_status = "rejected".to_string(); + } else { + // Read min_approvals first to avoid borrow conflict + let min = stage + .get("min_approvals") + .and_then(|v| v.as_u64()) + .unwrap_or(1) as usize; + // Add actor to approved_by + if let Some(ab) = stage.get_mut("approved_by") { + if let Some(arr) = ab.as_array_mut() { + let actor_val = serde_json::Value::String(actor.to_string()); + if !arr.contains(&actor_val) { + arr.push(actor_val); + } + if arr.len() >= min { + new_stage_status = "approved".to_string(); + } + } + } + if new_stage_status == "approved" { + stage["status"] = serde_json::Value::String("approved".to_string()); + } + } + break; + } + } + + // Update draft-level status + let all_approved = stages_arr + .iter() + .all(|s| s.get("status").and_then(|v| v.as_str()) == Some("approved")); + let any_rejected = stages_arr + .iter() + .any(|s| s.get("status").and_then(|v| v.as_str()) == Some("rejected")); + + if any_rejected { + draft["status"] = serde_json::Value::String("rejected".to_string()); + } else if all_approved { + draft["status"] = serde_json::Value::String("approved".to_string()); + } + } else { + // Flat (no stages) draft + if decision == "reject" { + draft["status"] = serde_json::Value::String("rejected".to_string()); + new_stage_status = "rejected".to_string(); + } else { + let min = draft + .get("policy_min_approvals") + .and_then(|v| v.as_u64()) + .unwrap_or(1) as usize; + let count = draft + .get("approvals") + .and_then(|v| v.as_array()) + .map(|a| { + a.iter() + .filter(|r| r.get("decision").and_then(|v| v.as_str()) == Some("approve")) + .count() + }) + .unwrap_or(0); + if count >= min.max(1) { + draft["status"] = serde_json::Value::String("approved".to_string()); + new_stage_status = "approved".to_string(); + } + } + } + + let final_draft_status = draft + .get("status") + .and_then(|v| v.as_str()) + .unwrap_or("proposed") + .to_string(); + + // Write updated draft + std::fs::write(&draft_path, serde_json::to_string_pretty(&draft)?)?; + + // Rebuild derived state + let snap_branch = ledger.head_branch()?; + let _ = edda_derive::rebuild_branch(&ledger, &snap_branch); + + let resp = ApprovalResponse { + event_id: event.event_id, + draft_status: final_draft_status, + stage_status: new_stage_status, + }; + + Ok((StatusCode::OK, Json(resp)).into_response()) +} + +/// Draft-related routes. +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/drafts", get(get_drafts)) + .route("/api/drafts/{id}/approve", post(post_draft_approve)) + .route("/api/drafts/{id}/deny", post(post_draft_deny)) +} diff --git a/crates/edda-serve/src/api/events.rs b/crates/edda-serve/src/api/events.rs new file mode 100644 index 0000000..6fb1d22 --- /dev/null +++ b/crates/edda-serve/src/api/events.rs @@ -0,0 +1,711 @@ +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::rejection::JsonRejection; +use axum::extract::{Path as AxumPath, Query, State}; +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_core::event::{finalize_event, new_decision_event, new_execution_event, new_note_event}; +use edda_core::types::{rel, DecisionPayload, Provenance}; +use edda_derive::{rebuild_branch, render_context, DeriveOptions}; +use edda_ledger::lock::WorkspaceLock; + +use crate::error::AppError; +use crate::state::AppState; + +// ── Health ── + +async fn health() -> Json { + Json(serde_json::json!({ "ok": true })) +} + +// ── GET /api/status ── + +#[derive(Serialize)] +struct StatusResponse { + branch: String, + last_commit: Option, + uncommitted_events: usize, +} + +#[derive(Serialize)] +struct LastCommit { + ts: String, + event_id: String, + title: String, +} + +async fn get_status(State(state): State>) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/status")?; + let head = ledger.head_branch()?; + let snap = rebuild_branch(&ledger, &head)?; + + let last_commit = snap.last_commit.as_ref().map(|c| LastCommit { + ts: c.ts.clone(), + event_id: c.event_id.clone(), + title: c.title.clone(), + }); + + Ok(Json(StatusResponse { + branch: head, + last_commit, + uncommitted_events: snap.uncommitted_events, + })) +} + +// ── GET /api/context ── + +#[derive(Deserialize)] +struct ContextQuery { + depth: Option, +} + +#[derive(Serialize)] +struct ContextResponse { + context: String, +} + +async fn get_context( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/context")?; + let head = ledger.head_branch()?; + let depth = params.depth.unwrap_or(5); + let text = render_context(&ledger, &head, DeriveOptions { depth })?; + Ok(Json(ContextResponse { context: text })) +} + +// ── GET /api/decisions ── + +#[derive(Deserialize)] +struct DecisionsQuery { + q: Option, + context_summary: Option, + limit: Option, + all: Option, + branch: Option, + /// ISO 8601 lower bound (inclusive) for temporal filtering. + after: Option, + /// ISO 8601 upper bound (inclusive) for temporal filtering. + before: Option, + /// Comma-separated tags to filter by (OR semantics). + tags: Option, + /// Filter decisions belonging to a specific village. + village_id: Option, +} + +async fn get_decisions( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + if let Some(ref after) = params.after { + crate::helpers::validate_iso8601(after).map_err(AppError::Validation)?; + } + if let Some(ref before) = params.before { + crate::helpers::validate_iso8601(before).map_err(AppError::Validation)?; + } + + let ledger = state.open_ledger().context("GET /api/decisions")?; + let q = params + .q + .as_deref() + .or(params.context_summary.as_deref()) + .unwrap_or(""); + let tags: Vec = params + .tags + .as_deref() + .filter(|s| !s.is_empty()) + .map(|s| s.split(',').map(|t| t.trim().to_string()).collect()) + .unwrap_or_default(); + let opts = edda_ask::AskOptions { + limit: params.limit.unwrap_or(20), + include_superseded: params.all.unwrap_or(false), + branch: params.branch, + impact: false, + after: params.after, + before: params.before, + tags, + village_id: params.village_id, + }; + let result = edda_ask::ask(&ledger, q, &opts, None)?; + Ok(Json(result)) +} + +// ── POST /api/decisions/batch ── + +#[derive(Deserialize)] +struct BatchQuery { + queries: Vec, + #[serde(default)] + slim: bool, +} + +#[derive(Deserialize)] +struct BatchSubQuery { + #[serde(default)] + q: Option, + #[serde(default)] + context_summary: Option, + #[serde(default)] + domain: Option, + #[serde(default)] + limit: Option, + #[serde(default)] + branch: Option, + #[serde(default)] + all: Option, +} + +#[derive(Serialize)] +struct BatchResponse { + results: Vec, +} + +#[derive(Serialize)] +struct BatchSubResult { + query_index: usize, + #[serde(skip_serializing_if = "Option::is_none")] + decisions: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + timeline: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + related_commits: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + related_notes: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + conversations: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, +} + +async fn post_decisions_batch( + State(state): State>, + body: Result, JsonRejection>, +) -> Result, AppError> { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + + if body.queries.is_empty() || body.queries.len() > 10 { + return Err(AppError::Validation( + "queries must contain 1\u{2013}10 items".into(), + )); + } + + let ledger = state.open_ledger().context("POST /api/decisions/batch")?; + let mut results = Vec::with_capacity(body.queries.len()); + + for (i, sub) in body.queries.iter().enumerate() { + let q = sub + .q + .as_deref() + .or(sub.context_summary.as_deref()) + .or(sub.domain.as_deref()) + .unwrap_or(""); + + let opts = edda_ask::AskOptions { + limit: sub.limit.unwrap_or(20).min(100), + include_superseded: sub.all.unwrap_or(false), + branch: sub.branch.clone(), + impact: false, + after: None, + before: None, + tags: vec![], + village_id: None, + }; + + match edda_ask::ask(&ledger, q, &opts, None) { + Ok(result) => { + if body.slim { + results.push(BatchSubResult { + query_index: i, + decisions: Some(result.decisions), + timeline: None, + related_commits: None, + related_notes: None, + conversations: None, + error: None, + }); + } else { + results.push(BatchSubResult { + query_index: i, + decisions: Some(result.decisions), + timeline: Some(result.timeline), + related_commits: Some(result.related_commits), + related_notes: Some(result.related_notes), + conversations: Some(result.conversations), + error: None, + }); + } + } + Err(e) => { + results.push(BatchSubResult { + query_index: i, + decisions: None, + timeline: None, + related_commits: None, + related_notes: None, + conversations: None, + error: Some(e.to_string()), + }); + } + } + } + + Ok(Json(BatchResponse { results })) +} + +// ── GET /api/decisions/:event_id/outcomes ── + +async fn get_decision_outcomes( + State(state): State>, + AxumPath(event_id): AxumPath, +) -> Result { + let ledger = state + .open_ledger() + .context("GET /api/decisions/:id/outcomes")?; + let outcomes = ledger.decision_outcomes(&event_id)?; + + match outcomes { + Some(metrics) => { + let json = serde_json::to_value(metrics)?; + Ok(Json(json).into_response()) + } + None => Err(AppError::NotFound(format!( + "decision not found: {}", + event_id + ))), + } +} + +// ── GET /api/decisions/:event_id/chain ── + +#[derive(Deserialize)] +struct ChainQuery { + depth: Option, +} + +#[derive(Serialize)] +struct ChainResponse { + root: ChainNodeResponse, + chain: Vec, + meta: ChainMeta, +} + +#[derive(Serialize)] +struct ChainNodeResponse { + event_id: String, + key: String, + value: String, + reason: String, + #[serde(skip_serializing_if = "Option::is_none")] + relation: Option, + #[serde(skip_serializing_if = "Option::is_none")] + depth: Option, + ts: String, + is_active: bool, +} + +#[derive(Serialize)] +struct ChainMeta { + max_depth: usize, + total_nodes: usize, +} + +async fn get_decision_chain( + State(state): State>, + AxumPath(event_id): AxumPath, + Query(params): Query, +) -> Result, AppError> { + let depth = params.depth.unwrap_or(3).min(10); + let ledger = state + .open_ledger() + .context("GET /api/decisions/:id/chain")?; + + let (root, chain) = ledger + .causal_chain(&event_id, depth)? + .ok_or_else(|| AppError::NotFound(format!("decision not found: {}", event_id)))?; + + let root_node = ChainNodeResponse { + event_id: root.event_id, + key: root.key, + value: root.value, + reason: root.reason, + relation: None, + depth: None, + ts: root.ts.unwrap_or_default(), + is_active: root.is_active, + }; + + let chain_nodes: Vec = chain + .into_iter() + .map(|entry| ChainNodeResponse { + event_id: entry.decision.event_id, + key: entry.decision.key, + value: entry.decision.value, + reason: entry.decision.reason, + relation: Some(entry.relation), + depth: Some(entry.depth), + ts: entry.decision.ts.unwrap_or_default(), + is_active: entry.decision.is_active, + }) + .collect(); + + let total_nodes = 1 + chain_nodes.len(); + Ok(Json(ChainResponse { + root: root_node, + chain: chain_nodes, + meta: ChainMeta { + max_depth: depth, + total_nodes, + }, + })) +} + +// ── GET /api/log ── + +#[derive(Deserialize)] +struct LogQuery { + r#type: Option, + keyword: Option, + after: Option, + before: Option, + limit: Option, +} + +#[derive(Serialize)] +struct LogEntry { + ts: String, + #[serde(rename = "type")] + event_type: String, + event_id: String, + branch: String, + #[serde(rename = "summary")] + detail: String, + tags: Vec, +} + +#[derive(Serialize)] +struct LogResponse { + events: Vec, +} + +async fn get_log( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + let ledger = state.open_ledger().context("GET /api/log")?; + let head = ledger.head_branch()?; + let limit = params.limit.unwrap_or(50); + + let events = ledger.iter_events_filtered( + &head, + params.r#type.as_deref(), + params.keyword.as_deref(), + params.after.as_deref(), + params.before.as_deref(), + limit, + )?; + + let results: Vec = events + .iter() + .map(|e| { + let detail = e + .payload + .get("text") + .and_then(|v| v.as_str()) + .or_else(|| e.payload.get("title").and_then(|v| v.as_str())) + .unwrap_or("") + .to_string(); + let tags: Vec = e + .payload + .get("tags") + .and_then(|v| v.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(); + LogEntry { + ts: e.ts.clone(), + event_type: e.event_type.clone(), + event_id: e.event_id.clone(), + branch: e.branch.clone(), + detail, + tags, + } + }) + .collect(); + + Ok(Json(LogResponse { events: results })) +} +// ── POST /api/note ── + +#[derive(Deserialize)] +struct NoteBody { + text: String, + role: Option, + tags: Option>, +} + +#[derive(Serialize)] +struct EventResponse { + event_id: String, +} + +async fn post_note( + State(state): State>, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + + let ledger = state.open_ledger().context("POST /api/note")?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + let branch = ledger.head_branch()?; + let parent_hash = ledger.last_event_hash()?; + let role = body.role.as_deref().unwrap_or("user"); + let tags = body.tags.unwrap_or_default(); + + let event = new_note_event(&branch, parent_hash.as_deref(), role, &body.text, &tags)?; + ledger.append_event(&event)?; + + Ok(( + StatusCode::CREATED, + Json(EventResponse { + event_id: event.event_id, + }), + )) +} + +// ── POST /api/decide ── + +#[derive(Deserialize)] +struct DecideBody { + decision: String, + reason: Option, +} + +#[derive(Serialize)] +struct DecideResponse { + event_id: String, + #[serde(skip_serializing_if = "Option::is_none")] + superseded: Option, +} + +async fn post_decide( + State(state): State>, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + + let (key, value) = body.decision.split_once('=').ok_or_else(|| { + AppError::Validation( + "decision must be in key=value format (e.g. \"db.engine=postgres\")".into(), + ) + })?; + let key = key.trim(); + let value = value.trim(); + + let ledger = state.open_ledger().context("POST /api/decide")?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + let branch = ledger.head_branch()?; + let parent_hash = ledger.last_event_hash()?; + + let dp = DecisionPayload { + key: key.to_string(), + value: value.to_string(), + reason: body.reason, + scope: None, + authority: None, + affected_paths: None, + tags: None, + review_after: None, + reversibility: None, + village_id: None, + }; + let mut event = new_decision_event(&branch, parent_hash.as_deref(), "system", &dp)?; + + // Auto-supersede: find prior decision with same key via SQL index + let prior = ledger.find_active_decision(&branch, key)?; + let mut superseded = None; + if let Some(ref row) = prior { + if row.value != value { + superseded = Some(row.event_id.clone()); + event.refs.provenance.push(Provenance { + target: row.event_id.clone(), + rel: rel::SUPERSEDES.to_string(), + note: Some(format!("key '{}' re-decided", key)), + }); + } + } + + finalize_event(&mut event)?; + ledger.append_event(&event)?; + + Ok(( + StatusCode::CREATED, + Json(DecideResponse { + event_id: event.event_id, + superseded, + }), + )) +} + +// ── POST /api/events/karvi ── + +#[derive(Deserialize)] +struct KarviEventBody { + version: String, + event_id: String, + event_type: String, + occurred_at: String, + #[serde(default)] + trace_id: Option, + #[serde(default)] + task_id: Option, + #[serde(default)] + step_id: Option, + #[serde(default)] + project: Option, + #[serde(default)] + runtime: Option, + #[serde(default)] + model: Option, + #[serde(default)] + actor: Option, + #[serde(default)] + usage: Option, + #[serde(default)] + result: Option, + #[serde(default)] + decision_ref: Option, +} + +#[derive(Serialize)] +struct KarviEventResponse { + event_id: String, + status: String, +} + +const VALID_KARVI_EVENT_TYPES: &[&str] = &["step_completed", "step_failed", "step_cancelled"]; + +async fn post_karvi_event( + State(state): State>, + Json(body): Json, +) -> Result { + // Validate version + if body.version != "karvi.event.v1" { + let err = serde_json::json!({ + "error": format!("unsupported version: {}", body.version), + }); + return Ok((StatusCode::BAD_REQUEST, Json(err)).into_response()); + } + + // Validate event_type + if !VALID_KARVI_EVENT_TYPES.contains(&body.event_type.as_str()) { + let err = serde_json::json!({ + "error": format!("unsupported event_type: {}", body.event_type), + }); + return Ok((StatusCode::BAD_REQUEST, Json(err)).into_response()); + } + + // Serialize full body as payload + let payload = serde_json::json!({ + "version": body.version, + "event_id": body.event_id, + "event_type": body.event_type, + "occurred_at": body.occurred_at, + "trace_id": body.trace_id, + "task_id": body.task_id, + "step_id": body.step_id, + "project": body.project, + "runtime": body.runtime, + "model": body.model, + "actor": body.actor, + "usage": body.usage, + "result": body.result, + "decision_ref": body.decision_ref, + }); + + let ledger = state.open_ledger().context("POST /api/events/karvi")?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + let branch = ledger.head_branch()?; + let parent_hash = ledger.last_event_hash()?; + + let event = new_execution_event( + &branch, + parent_hash.as_deref(), + &body.event_id, + &body.occurred_at, + payload, + body.decision_ref.as_deref(), + )?; + + let inserted = ledger.append_event_idempotent(&event)?; + + let response = KarviEventResponse { + event_id: event.event_id, + status: if inserted { + "created".to_string() + } else { + "duplicate".to_string() + }, + }; + + let status = if inserted { + StatusCode::CREATED + } else { + StatusCode::OK + }; + + Ok((status, Json(response)).into_response()) +} + +/// Public event routes (no auth required). +pub(crate) fn public_routes() -> Router> { + Router::new().route("/api/health", get(health)) +} + +/// Protected event routes (auth middleware applied). +pub(crate) fn protected_routes() -> Router> { + Router::new() + .route("/api/status", get(get_status)) + .route("/api/context", get(get_context)) + .route("/api/decisions", get(get_decisions)) + .route("/api/decisions/batch", post(post_decisions_batch)) + .route( + "/api/decisions/{event_id}/outcomes", + get(get_decision_outcomes), + ) + .route("/api/decisions/{event_id}/chain", get(get_decision_chain)) + .route("/api/log", get(get_log)) + .route("/api/note", post(post_note)) + .route("/api/decide", post(post_decide)) + .route("/api/events/karvi", post(post_karvi_event)) +} + +/// All event routes (for test router without auth middleware). +#[cfg(test)] +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/health", get(health)) + .route("/api/status", get(get_status)) + .route("/api/context", get(get_context)) + .route("/api/decisions", get(get_decisions)) + .route("/api/decisions/batch", post(post_decisions_batch)) + .route( + "/api/decisions/{event_id}/outcomes", + get(get_decision_outcomes), + ) + .route("/api/decisions/{event_id}/chain", get(get_decision_chain)) + .route("/api/log", get(get_log)) + .route("/api/note", post(post_note)) + .route("/api/decide", post(post_decide)) + .route("/api/events/karvi", post(post_karvi_event)) +} diff --git a/crates/edda-serve/src/api/ingestion.rs b/crates/edda-serve/src/api/ingestion.rs new file mode 100644 index 0000000..fe72504 --- /dev/null +++ b/crates/edda-serve/src/api/ingestion.rs @@ -0,0 +1,309 @@ +use std::sync::Arc; + +use axum::extract::rejection::JsonRejection; +use axum::extract::{Path as AxumPath, Query, State}; +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_ledger::lock::WorkspaceLock; + +use crate::error::AppError; +use crate::state::AppState; + +// ── Ingestion types ── + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct EvaluateBody { + event_type: String, + source_layer: String, + #[serde(default)] + source_refs: Vec, + #[serde(default)] + summary: Option, + #[serde(default)] + detail: Option, + #[serde(default)] + tags: Vec, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct EvaluateResponse { + action: String, + #[serde(skip_serializing_if = "Option::is_none")] + record_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + suggestion_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + reason: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct ManualIngestBody { + event_type: String, + source_layer: String, + #[serde(default)] + source_refs: Vec, + summary: String, + detail: serde_json::Value, + #[serde(default)] + tags: Vec, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct IngestionRecordsQuery { + #[serde(default)] + limit: Option, + #[serde(default)] + source_layer: Option, + #[serde(default)] + trigger_type: Option, +} + +// ── Ingestion handlers ── + +// POST /api/ingestion/evaluate +async fn post_ingestion_evaluate( + State(state): State>, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + + let layer: edda_ingestion::SourceLayer = body + .source_layer + .parse() + .map_err(|e: String| AppError::Validation(e))?; + + let result = edda_ingestion::evaluate_trigger(&body.event_type, &body.source_layer); + + match result { + edda_ingestion::TriggerResult::AutoIngest => { + let ledger = state.open_ledger()?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + let summary = body + .summary + .unwrap_or_else(|| format!("{} from {}", body.event_type, body.source_layer)); + let record = edda_ingestion::IngestionRecord { + id: edda_ingestion::IngestionRecord::new_id("prec"), + trigger_type: edda_ingestion::TriggerType::Auto, + event_type: body.event_type, + source_layer: layer, + source_refs: body.source_refs, + summary, + detail: body.detail.unwrap_or(serde_json::json!({})), + tags: body.tags, + created_at: crate::helpers::time_now_rfc3339(), + }; + + edda_ingestion::write_ingestion_record(&ledger, &record)?; + + Ok(( + StatusCode::CREATED, + Json(EvaluateResponse { + action: "ingested".to_string(), + record_id: Some(record.id), + suggestion_id: None, + reason: None, + }), + ) + .into_response()) + } + edda_ingestion::TriggerResult::SuggestIngest { reason } => { + let ledger = state.open_ledger()?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + let summary = body + .summary + .unwrap_or_else(|| format!("{} from {}", body.event_type, body.source_layer)); + let suggestion = edda_ingestion::Suggestion { + id: edda_ingestion::Suggestion::new_id(), + event_type: body.event_type, + source_layer: layer, + source_refs: body.source_refs, + summary, + suggested_because: reason.clone(), + detail: body.detail.unwrap_or(serde_json::json!({})), + tags: body.tags, + status: edda_ingestion::SuggestionStatus::Pending, + created_at: crate::helpers::time_now_rfc3339(), + reviewed_at: None, + }; + + let queue = edda_ingestion::SuggestionQueue::new(&ledger); + let id = queue.enqueue(&suggestion)?; + + Ok(( + StatusCode::OK, + Json(EvaluateResponse { + action: "queued".to_string(), + record_id: None, + suggestion_id: Some(id), + reason: Some(reason), + }), + ) + .into_response()) + } + edda_ingestion::TriggerResult::Skip => Ok(( + StatusCode::OK, + Json(EvaluateResponse { + action: "skipped".to_string(), + record_id: None, + suggestion_id: None, + reason: None, + }), + ) + .into_response()), + } +} + +// POST /api/ingestion/records +async fn post_ingestion_record( + State(state): State>, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + + let layer: edda_ingestion::SourceLayer = body + .source_layer + .parse() + .map_err(|e: String| AppError::Validation(e))?; + + let ledger = state.open_ledger()?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + let record = edda_ingestion::IngestionRecord { + id: edda_ingestion::IngestionRecord::new_id("prec"), + trigger_type: edda_ingestion::TriggerType::Manual, + event_type: body.event_type, + source_layer: layer, + source_refs: body.source_refs, + summary: body.summary, + detail: body.detail, + tags: body.tags, + created_at: crate::helpers::time_now_rfc3339(), + }; + + edda_ingestion::write_ingestion_record(&ledger, &record)?; + + Ok(( + StatusCode::CREATED, + Json(serde_json::json!({ "recordId": record.id })), + )) +} + +// GET /api/ingestion/records +async fn get_ingestion_records( + State(state): State>, + Query(params): Query, +) -> Result>, AppError> { + let ledger = state.open_ledger()?; + let events = ledger.iter_events_by_type("ingestion")?; + + let mut records: Vec = events + .into_iter() + .filter_map(|e| serde_json::from_value(e.payload).ok()) + .collect(); + + if let Some(ref layer) = params.source_layer { + records.retain(|r| r.source_layer.to_string() == *layer); + } + if let Some(ref tt) = params.trigger_type { + records.retain(|r| { + let label = match r.trigger_type { + edda_ingestion::TriggerType::Auto => "auto", + edda_ingestion::TriggerType::Suggested => "suggested", + edda_ingestion::TriggerType::Manual => "manual", + }; + label == tt.as_str() + }); + } + + let limit = params.limit.unwrap_or(50); + records.truncate(limit); + + Ok(Json(records)) +} + +// GET /api/ingestion/suggestions +async fn get_ingestion_suggestions( + State(state): State>, +) -> Result>, AppError> { + let ledger = state.open_ledger()?; + let queue = edda_ingestion::SuggestionQueue::new(&ledger); + let pending = queue.list_pending()?; + Ok(Json(pending)) +} + +// POST /api/ingestion/suggestions/{id}/accept +async fn post_suggestion_accept( + State(state): State>, + AxumPath(id): AxumPath, +) -> Result, AppError> { + let ledger = state.open_ledger()?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + // Pre-check for proper HTTP error codes + let row = ledger + .get_suggestion(&id)? + .ok_or_else(|| AppError::NotFound(format!("suggestion not found: {id}")))?; + if row.status != "pending" { + return Err(AppError::Conflict(format!( + "suggestion {id} has status '{}', expected 'pending'", + row.status + ))); + } + + let queue = edda_ingestion::SuggestionQueue::new(&ledger); + let record = queue.accept(&id)?; + Ok(Json(record)) +} + +// POST /api/ingestion/suggestions/{id}/reject +async fn post_suggestion_reject( + State(state): State>, + AxumPath(id): AxumPath, +) -> Result, AppError> { + let ledger = state.open_ledger()?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + // Pre-check for proper HTTP error codes + let row = ledger + .get_suggestion(&id)? + .ok_or_else(|| AppError::NotFound(format!("suggestion not found: {id}")))?; + if row.status != "pending" { + return Err(AppError::Conflict(format!( + "suggestion {id} has status '{}', expected 'pending'", + row.status + ))); + } + + let queue = edda_ingestion::SuggestionQueue::new(&ledger); + queue.reject(&id)?; + Ok(Json(serde_json::json!({ "ok": true }))) +} + +/// Ingestion routes. +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/ingestion/evaluate", post(post_ingestion_evaluate)) + .route( + "/api/ingestion/records", + post(post_ingestion_record).get(get_ingestion_records), + ) + .route("/api/ingestion/suggestions", get(get_ingestion_suggestions)) + .route( + "/api/ingestion/suggestions/{id}/accept", + post(post_suggestion_accept), + ) + .route( + "/api/ingestion/suggestions/{id}/reject", + post(post_suggestion_reject), + ) +} diff --git a/crates/edda-serve/src/api/metrics.rs b/crates/edda-serve/src/api/metrics.rs new file mode 100644 index 0000000..24ccaa1 --- /dev/null +++ b/crates/edda-serve/src/api/metrics.rs @@ -0,0 +1,369 @@ +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::rejection::JsonRejection; +use axum::extract::{Path as AxumPath, Query, State}; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_aggregate::aggregate::{per_project_metrics, DateRange, ProjectMetrics}; +use edda_aggregate::controls::evaluate_controls_rules; +use edda_aggregate::quality::{model_quality_from_events, QualityReport}; +use edda_aggregate::rollup; +use edda_store::registry::list_projects; + +use crate::error::AppError; +use crate::state::AppState; + +use super::dashboard::DashboardPeriod; + +// ── GET /api/metrics/quality ── + +#[derive(Deserialize)] +struct QualityQuery { + after: Option, + before: Option, +} + +async fn get_quality_metrics( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + let range = DateRange { + after: params.after, + before: params.before, + }; + let ledger = state.open_ledger().context("GET /api/metrics/quality")?; + let events = ledger.iter_events_by_type("execution_event")?; + let report = model_quality_from_events(&events, &range); + Ok(Json(report)) +} + +// ── GET /api/controls/suggestions ── + +#[derive(Deserialize)] +struct ControlsSuggestionsQuery { + after: Option, + before: Option, + min_samples: Option, +} + +#[derive(Serialize)] +struct ControlsSuggestionsResponse { + suggestions: Vec, + quality: QualityReport, +} + +async fn get_controls_suggestions( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + let range = DateRange { + after: params.after, + before: params.before, + }; + let ledger = state + .open_ledger() + .context("GET /api/controls/suggestions")?; + let events = ledger.iter_events_by_type("execution_event")?; + let report = model_quality_from_events(&events, &range); + + let rules = edda_bridge_claude::controls_suggest::load_rules(); + let suggestions = evaluate_controls_rules(&rules, &report, params.min_samples); + + Ok(Json(ControlsSuggestionsResponse { + suggestions, + quality: report, + })) +} + +// ── GET /api/controls/patches ── + +#[derive(Deserialize)] +struct ControlsPatchesQuery { + status: Option, +} + +async fn get_controls_patches( + State(state): State>, + Query(params): Query, +) -> Result>, AppError> { + let project_id = edda_store::project_id(&state.repo_root); + + let status_filter = match params.status.as_deref() { + Some("pending") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Pending), + Some("approved") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Approved), + Some("dismissed") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Dismissed), + Some("applied") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Applied), + Some(s) => { + return Err(AppError::Validation(format!( + "Unknown status: {s} (expected: pending, approved, dismissed, applied)" + ))); + } + None => None, + }; + + let patches = + edda_bridge_claude::controls_suggest::list_patches(&project_id, status_filter.as_ref())?; + Ok(Json(patches)) +} + +// ── POST /api/controls/patches/{patch_id}/approve ── + +#[derive(Deserialize)] +struct ApprovePatchBody { + #[serde(default = "default_approve_actor")] + by: String, +} + +fn default_approve_actor() -> String { + "api".to_string() +} + +async fn post_approve_controls_patch( + State(state): State>, + AxumPath(patch_id): AxumPath, + body: Result, JsonRejection>, +) -> Result, AppError> { + let project_id = edda_store::project_id(&state.repo_root); + let by = match body { + Ok(Json(b)) => b.by, + Err(_) => "api".to_string(), + }; + + let patch = edda_bridge_claude::controls_suggest::approve_patch(&project_id, &patch_id, &by)?; + Ok(Json(patch)) +} + +// ── GET /api/metrics/overview ── + +fn default_overview_days() -> usize { + 30 +} + +#[derive(Deserialize)] +struct MetricsOverviewQuery { + #[serde(default = "default_overview_days")] + days: usize, + group: Option, +} + +#[derive(Serialize)] +struct MetricsOverviewResponse { + period: DashboardPeriod, + projects: Vec, + totals: MetricsTotals, +} + +#[derive(Serialize)] +struct MetricsTotals { + total_cost_usd: f64, + total_events: usize, + total_commits: usize, + total_steps: u64, + overall_success_rate: f64, +} + +async fn get_metrics_overview( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + if state.chronicle.is_none() { + return Err(anyhow::anyhow!("chronicle feature not enabled").into()); + } + + let all_projects = list_projects(); + let projects: Vec<_> = if let Some(ref group) = params.group { + all_projects + .into_iter() + .filter(|p| p.group.as_deref() == Some(group.as_str())) + .collect() + } else { + all_projects + }; + + let now = time::OffsetDateTime::now_utc(); + let from_date = now - time::Duration::days(params.days as i64); + let to_str = now + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + let from_str = from_date + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + + let range = DateRange { + after: Some(from_str[..10].to_string()), + before: None, + }; + + let metrics = per_project_metrics(&projects, &range, params.days); + + let total_cost: f64 = metrics.iter().map(|m| m.cost.total_usd).sum(); + let total_events: usize = metrics.iter().map(|m| m.activity.events).sum(); + let total_commits: usize = metrics.iter().map(|m| m.activity.commits).sum(); + let total_steps: u64 = metrics.iter().map(|m| m.quality.total_steps).sum(); + let total_success: u64 = metrics + .iter() + .map(|m| (m.quality.success_rate * m.quality.total_steps as f64) as u64) + .sum(); + + let period = DashboardPeriod { + from: from_str[..10].to_string(), + to: to_str[..10].to_string(), + days: params.days, + }; + + Ok(Json(MetricsOverviewResponse { + period, + projects: metrics, + totals: MetricsTotals { + total_cost_usd: total_cost, + total_events, + total_commits, + total_steps, + overall_success_rate: if total_steps > 0 { + total_success as f64 / total_steps as f64 + } else { + 0.0 + }, + }, + })) +} + +// ── GET /api/metrics/trends ── + +fn default_trend_granularity() -> String { + "daily".to_string() +} + +#[derive(Deserialize)] +struct TrendsQuery { + #[serde(default = "default_overview_days")] + days: usize, + #[serde(default = "default_trend_granularity")] + granularity: String, + group: Option, +} + +#[derive(Serialize)] +struct TrendsResponse { + granularity: String, + data: Vec, +} + +#[derive(Serialize)] +struct TrendPoint { + date: String, + events: usize, + commits: usize, + cost_usd: f64, + execution_count: u64, + success_count: u64, + success_rate: f64, +} + +async fn get_metrics_trends( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + if state.chronicle.is_none() { + return Err(anyhow::anyhow!("chronicle feature not enabled").into()); + } + + let all_projects = list_projects(); + let projects: Vec<_> = if let Some(ref group) = params.group { + all_projects + .into_iter() + .filter(|p| p.group.as_deref() == Some(group.as_str())) + .collect() + } else { + all_projects + }; + + let now = time::OffsetDateTime::now_utc(); + let from_date = now - time::Duration::days(params.days as i64); + let from_str = from_date + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + + let range = DateRange { + after: Some(from_str[..10].to_string()), + before: None, + }; + + let r = rollup::compute_rollup(&projects, &range, "edda"); + + let data: Vec = match params.granularity.as_str() { + "weekly" => r + .weekly + .iter() + .map(|w| TrendPoint { + date: w.week_start.clone(), + events: w.events, + commits: w.commits, + cost_usd: w.cost_usd, + execution_count: w.execution_count, + success_count: w.success_count, + success_rate: if w.execution_count > 0 { + w.success_count as f64 / w.execution_count as f64 + } else { + 0.0 + }, + }) + .collect(), + "monthly" => r + .monthly + .iter() + .map(|m| TrendPoint { + date: m.month.clone(), + events: m.events, + commits: m.commits, + cost_usd: m.cost_usd, + execution_count: m.execution_count, + success_count: m.success_count, + success_rate: if m.execution_count > 0 { + m.success_count as f64 / m.execution_count as f64 + } else { + 0.0 + }, + }) + .collect(), + _ => r + .daily + .iter() + .map(|d| TrendPoint { + date: d.date.clone(), + events: d.events, + commits: d.commits, + cost_usd: d.cost_usd, + execution_count: d.execution_count, + success_count: d.success_count, + success_rate: if d.execution_count > 0 { + d.success_count as f64 / d.execution_count as f64 + } else { + 0.0 + }, + }) + .collect(), + }; + + Ok(Json(TrendsResponse { + granularity: params.granularity, + data, + })) +} + +/// Metrics-related routes (quality, controls, overview, trends). +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/metrics/quality", get(get_quality_metrics)) + .route("/api/metrics/overview", get(get_metrics_overview)) + .route("/api/metrics/trends", get(get_metrics_trends)) + .route("/api/controls/suggestions", get(get_controls_suggestions)) + .route("/api/controls/patches", get(get_controls_patches)) + .route( + "/api/controls/patches/{patch_id}/approve", + post(post_approve_controls_patch), + ) +} diff --git a/crates/edda-serve/src/api/mod.rs b/crates/edda-serve/src/api/mod.rs new file mode 100644 index 0000000..f0754bb --- /dev/null +++ b/crates/edda-serve/src/api/mod.rs @@ -0,0 +1,12 @@ +pub(crate) mod analytics; +pub(crate) mod auth; +pub(crate) mod briefs; +pub(crate) mod dashboard; +pub(crate) mod drafts; +pub(crate) mod events; +pub(crate) mod ingestion; +pub(crate) mod metrics; +pub(crate) mod policy; +pub(crate) mod snapshots; +pub(crate) mod stream; +pub(crate) mod telemetry; diff --git a/crates/edda-serve/src/api/policy.rs b/crates/edda-serve/src/api/policy.rs new file mode 100644 index 0000000..21c236f --- /dev/null +++ b/crates/edda-serve/src/api/policy.rs @@ -0,0 +1,296 @@ +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::{Path as AxumPath, Query, State}; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_core::policy; +use edda_ledger::Ledger; + +use crate::error::AppError; +use crate::state::AppState; + +// ── POST /api/scope/check ── + +#[derive(Deserialize)] +struct ScopeCheckBody { + project_id: String, + session_id: String, + files: Vec, +} + +#[derive(Serialize)] +struct ScopeCheckResult { + path: String, + allowed: bool, +} + +#[derive(Serialize)] +struct ScopeCheckResponse { + session_id: String, + label: String, + scope: Vec, + no_claim: bool, + all_allowed: bool, + results: Vec, +} + +// SECURITY: `project_id` is caller-supplied and not validated against any +// ACL. Acceptable because edda is a single-user local tool; revisit if +// multi-tenant isolation is ever required. +async fn post_scope_check( + Json(body): Json, +) -> Result, AppError> { + let board = edda_bridge_claude::peers::compute_board_state(&body.project_id); + let claim = board + .claims + .iter() + .find(|c| c.session_id == body.session_id); + + match claim { + None => { + // Permissive default: no claim means all files allowed + let results = body + .files + .iter() + .map(|f| ScopeCheckResult { + path: f.clone(), + allowed: true, + }) + .collect(); + Ok(Json(ScopeCheckResponse { + session_id: body.session_id, + label: String::new(), + scope: vec![], + no_claim: true, + all_allowed: true, + results, + })) + } + Some(claim) => { + // Build glob set from claim patterns + let mut builder = globset::GlobSetBuilder::new(); + for pattern in &claim.paths { + if let Ok(glob) = globset::GlobBuilder::new(pattern) + .literal_separator(false) + .build() + { + builder.add(glob); + } + } + let glob_set = builder + .build() + .map_err(|e| anyhow::anyhow!("invalid glob patterns: {}", e))?; + + let results: Vec = body + .files + .iter() + .map(|f| ScopeCheckResult { + path: f.clone(), + allowed: glob_set.is_match(f), + }) + .collect(); + + let all_allowed = results.iter().all(|r| r.allowed); + + Ok(Json(ScopeCheckResponse { + session_id: body.session_id, + label: claim.label.clone(), + scope: claim.paths.clone(), + no_claim: false, + all_allowed, + results, + })) + } + } +} + +// ── GET /api/scope/whitelist ── + +#[derive(Deserialize)] +struct WhitelistQuery { + project_id: String, + #[serde(default)] + session_id: Option, +} + +#[derive(Serialize)] +struct WhitelistClaim { + session_id: String, + label: String, + patterns: Vec, + ts: String, +} + +#[derive(Serialize)] +struct WhitelistResponse { + claims: Vec, +} + +// SECURITY: `project_id` is caller-supplied and not validated against any +// ACL. Acceptable because edda is a single-user local tool; revisit if +// multi-tenant isolation is ever required. +async fn get_scope_whitelist( + Query(query): Query, +) -> Result, AppError> { + let board = edda_bridge_claude::peers::compute_board_state(&query.project_id); + + let claims: Vec = board + .claims + .iter() + .filter(|c| { + query + .session_id + .as_ref() + .is_none_or(|sid| &c.session_id == sid) + }) + .map(|c| WhitelistClaim { + session_id: c.session_id.clone(), + label: c.label.clone(), + patterns: c.paths.clone(), + ts: c.ts.clone(), + }) + .collect(); + + Ok(Json(WhitelistResponse { claims })) +} + +// ── POST /api/authz/check ── + +async fn post_authz_check( + State(state): State>, + Json(body): Json, +) -> Result, AppError> { + let edda_dir = state.repo_root.join(".edda"); + let pol = policy::load_policy_from_dir(&edda_dir)?; + let actors = policy::load_actors_from_dir(&edda_dir)?; + let result = policy::evaluate_authz(&body, &pol, &actors); + Ok(Json(result)) +} + +// ── GET /api/tool-tier/:tool_name ── + +async fn get_tool_tier( + State(state): State>, + AxumPath(tool_name): AxumPath, +) -> Result, AppError> { + let edda_dir = state.repo_root.join(".edda"); + let config = edda_core::tool_tier::load_tool_tiers_from_dir(&edda_dir)?; + let result = edda_core::tool_tier::resolve_tool_tier(&config, &tool_name); + Ok(Json(result)) +} + +// ── POST /api/approval/check ── + +#[derive(Deserialize)] +struct ApprovalCheckRequest { + step: String, + #[serde(default)] + bundle_id: Option, + #[serde(default)] + risk_level: Option, + #[serde(default)] + files_changed: Option, + #[serde(default)] + tests_failed: Option, + #[serde(default)] + off_limits_touched: Option, +} + +async fn post_approval_check( + State(state): State>, + Json(body): Json, +) -> Result, AppError> { + let edda_dir = state.repo_root.join(".edda"); + let policy = edda_core::approval::load_approval_policy(&edda_dir)?; + + // Build ReviewBundle from request or from ledger + let bundle = if let Some(bundle_id) = &body.bundle_id { + let ledger = Ledger::open(&state.repo_root).context("POST /api/approval/check")?; + let Some(row) = ledger.get_bundle(bundle_id)? else { + return Err(AppError::NotFound(format!( + "Bundle '{}' not found", + bundle_id + ))); + }; + let Some(event) = ledger.get_event(&row.event_id)? else { + return Err(AppError::NotFound(format!( + "Event for bundle '{}' not found", + bundle_id + ))); + }; + serde_json::from_value::(event.payload)? + } else { + // Build a synthetic bundle from inline fields + let risk = body + .risk_level + .unwrap_or(edda_core::bundle::RiskLevel::Medium); + let file_count = body.files_changed.unwrap_or(0) as usize; + let failed = body.tests_failed.unwrap_or(0); + let files: Vec = (0..file_count) + .map(|i| edda_core::bundle::FileChange { + path: format!("file_{i}"), + added: 1, + deleted: 0, + }) + .collect(); + edda_core::bundle::ReviewBundle { + bundle_id: "inline".to_string(), + change_summary: edda_core::bundle::ChangeSummary { + files, + total_added: file_count as u32, + total_deleted: 0, + diff_ref: "inline".to_string(), + }, + test_results: edda_core::bundle::TestResults { + passed: 0, + failed, + ignored: 0, + total: failed, + failures: vec![], + command: "inline".to_string(), + }, + risk_assessment: edda_core::bundle::RiskAssessment { + level: risk, + factors: vec![], + }, + suggested_action: edda_core::bundle::SuggestedAction::Review, + suggested_reason: "inline check".to_string(), + } + }; + + let phase_state = edda_core::agent_phase::AgentPhaseState { + phase: edda_core::agent_phase::AgentPhase::Implement, + session_id: "api-check".to_string(), + label: None, + issue: None, + pr: None, + branch: None, + confidence: 1.0, + detected_at: String::new(), + signals: vec![], + }; + + let ctx = edda_core::approval::EvalContext { + bundle: &bundle, + phase: &phase_state, + off_limits_touched: body.off_limits_touched.unwrap_or(false), + consecutive_failures: 0, + current_time: Some(time::OffsetDateTime::now_utc()), + }; + + let decision = policy.evaluate(&body.step, &ctx); + Ok(Json(decision)) +} + +/// Policy-related routes (scope, authz, approval, tool-tier). +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/scope/check", post(post_scope_check)) + .route("/api/scope/whitelist", get(get_scope_whitelist)) + .route("/api/authz/check", post(post_authz_check)) + .route("/api/approval/check", post(post_approval_check)) + .route("/api/tool-tier/{tool_name}", get(get_tool_tier)) +} diff --git a/crates/edda-serve/src/api/snapshots.rs b/crates/edda-serve/src/api/snapshots.rs new file mode 100644 index 0000000..b1dbbe5 --- /dev/null +++ b/crates/edda-serve/src/api/snapshots.rs @@ -0,0 +1,343 @@ +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::rejection::JsonRejection; +use axum::extract::{Path as AxumPath, Query, State}; +use axum::http::StatusCode; +use axum::response::IntoResponse; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_core::event::new_snapshot_event; +use edda_ledger::lock::WorkspaceLock; +use edda_ledger::Ledger; + +use crate::error::AppError; +use crate::state::AppState; + +// ── POST /api/snapshot ── + +#[derive(Deserialize)] +struct SnapshotBody { + context: serde_json::Value, + result: serde_json::Value, + engine_version: String, + #[serde(default = "default_snapshot_schema")] + schema_version: String, + context_hash: String, + #[serde(default = "default_redaction_level")] + redaction_level: String, + village_id: Option, + cycle_id: Option, +} + +fn default_snapshot_schema() -> String { + "snapshot.v1".to_string() +} + +fn default_redaction_level() -> String { + "full".to_string() +} + +#[derive(Serialize)] +struct SnapshotResponse { + event_id: String, + context_hash: String, +} + +async fn post_snapshot( + State(state): State>, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; + + if body.engine_version.is_empty() { + return Err(AppError::Validation( + "engine_version must not be empty".into(), + )); + } + if body.context_hash.is_empty() { + return Err(AppError::Validation( + "context_hash must not be empty".into(), + )); + } + + let ledger = state.open_ledger().context("POST /api/snapshot")?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + + let branch = ledger.head_branch()?; + let parent_hash = ledger.last_event_hash()?; + + // Attempt blob offload for large payloads + let context_bytes = serde_json::to_vec(&body.context)?; + let result_bytes = serde_json::to_vec(&body.result)?; + + let threshold = edda_ledger::SNAPSHOT_BLOB_THRESHOLD; + let context_blob = edda_ledger::blob_put_if_large( + &ledger.paths, + &context_bytes, + edda_ledger::BlobClass::DecisionEvidence, + threshold, + ) + .map_err(|e| anyhow::anyhow!("writing context blob: {e}"))?; + let result_blob = edda_ledger::blob_put_if_large( + &ledger.paths, + &result_bytes, + edda_ledger::BlobClass::DecisionEvidence, + threshold, + ) + .map_err(|e| anyhow::anyhow!("writing result blob: {e}"))?; + + let has_blobs = context_blob.is_some() || result_blob.is_some(); + + // Build event payload: metadata + inline or blob refs + let mut payload = serde_json::json!({ + "engine_version": body.engine_version, + "schema_version": body.schema_version, + "context_hash": body.context_hash, + "redaction_level": body.redaction_level, + }); + if let Some(ref vid) = body.village_id { + payload["village_id"] = serde_json::Value::String(vid.clone()); + } + if let Some(ref cid) = body.cycle_id { + payload["cycle_id"] = serde_json::Value::String(cid.clone()); + } + + let mut blob_refs = Vec::new(); + if let Some(ref br) = context_blob { + payload["context_blob"] = serde_json::Value::String(br.clone()); + blob_refs.push(br.clone()); + } else { + payload["context_inline"] = body.context; + } + if let Some(ref br) = result_blob { + payload["result_blob"] = serde_json::Value::String(br.clone()); + blob_refs.push(br.clone()); + } else { + payload["result_inline"] = body.result; + } + + let event = new_snapshot_event(&branch, parent_hash.as_deref(), payload, blob_refs)?; + let event_id = event.event_id.clone(); + let created_at = event.ts.clone(); + + ledger.append_event(&event)?; + + // Insert into materialized view + ledger.insert_snapshot(&edda_ledger::DecideSnapshotRow { + event_id: event_id.clone(), + context_hash: body.context_hash.clone(), + engine_version: body.engine_version, + schema_version: body.schema_version, + redaction_level: body.redaction_level, + village_id: body.village_id, + cycle_id: body.cycle_id, + has_blobs, + created_at, + })?; + + Ok(( + StatusCode::CREATED, + Json(SnapshotResponse { + event_id, + context_hash: body.context_hash, + }), + )) +} + +// ── GET /api/snapshots ── + +#[derive(Deserialize)] +struct SnapshotsQuery { + village_id: Option, + engine_version: Option, + #[serde(default = "default_snapshot_limit")] + limit: usize, +} + +fn default_snapshot_limit() -> usize { + 20 +} + +async fn get_snapshots( + State(state): State>, + Query(query): Query, +) -> Result { + let ledger = state.open_ledger().context("GET /api/snapshots")?; + let rows = ledger.query_snapshots( + query.village_id.as_deref(), + query.engine_version.as_deref(), + query.limit, + )?; + + let mut snapshots = Vec::new(); + for row in &rows { + let snapshot = reconstruct_snapshot(&ledger, row)?; + snapshots.push(snapshot); + } + + Ok(Json(snapshots)) +} + +// ── GET /api/snapshots/:context_hash ── + +async fn get_snapshots_by_hash( + State(state): State>, + AxumPath(context_hash): AxumPath, +) -> Result { + let ledger = state.open_ledger().context("GET /api/snapshots/:hash")?; + let rows = ledger.snapshots_by_context_hash(&context_hash)?; + + if rows.is_empty() { + return Err(AppError::NotFound(format!( + "no snapshots found for context_hash: {context_hash}" + ))); + } + + let mut snapshots = Vec::new(); + for row in &rows { + let snapshot = reconstruct_snapshot(&ledger, row)?; + snapshots.push(snapshot); + } + + Ok(Json(snapshots)) +} + +// ── GET /api/villages/{village_id}/stats ── + +#[derive(Deserialize)] +struct VillageStatsQuery { + /// ISO 8601 lower bound (inclusive). + after: Option, + /// ISO 8601 upper bound (inclusive). + before: Option, +} + +async fn get_village_stats( + State(state): State>, + AxumPath(village_id): AxumPath, + Query(params): Query, +) -> Result, AppError> { + if let Some(ref after) = params.after { + crate::helpers::validate_iso8601(after).map_err(AppError::Validation)?; + } + if let Some(ref before) = params.before { + crate::helpers::validate_iso8601(before).map_err(AppError::Validation)?; + } + + let ledger = state.open_ledger().context("GET /api/villages/:id/stats")?; + let stats = ledger.village_stats( + &village_id, + params.after.as_deref(), + params.before.as_deref(), + )?; + Ok(Json(stats)) +} + +// ── GET /api/patterns ── + +#[derive(Deserialize)] +struct PatternsQuery { + village_id: Option, + /// Number of days to look back (default 7, max 90). + #[serde(default)] + lookback_days: Option, + /// Minimum occurrences to qualify as a pattern (default 3). + #[serde(default)] + min_occurrences: Option, +} + +async fn get_patterns( + State(state): State>, + Query(params): Query, +) -> Result, AppError> { + let village_id = params + .village_id + .as_deref() + .filter(|s| !s.is_empty()) + .ok_or_else(|| AppError::Validation("village_id query parameter is required".into()))?; + + let lookback_days = params.lookback_days.unwrap_or(7).min(90); + let min_occurrences = params.min_occurrences.unwrap_or(3).max(2); + + let now = time::OffsetDateTime::now_utc(); + let after_date = now - time::Duration::days(i64::from(lookback_days)); + let after_str = after_date + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + + let ledger = state.open_ledger().context("GET /api/patterns")?; + let patterns = ledger.detect_village_patterns(village_id, &after_str, min_occurrences)?; + let total = patterns.len(); + + Ok(Json(edda_ledger::sqlite_store::PatternDetectionResult { + village_id: village_id.to_string(), + lookback_days, + after: after_str, + total_patterns: total, + patterns, + })) +} + +/// Reconstruct a full snapshot JSON from a materialized view row + event payload. +fn reconstruct_snapshot( + ledger: &Ledger, + row: &edda_ledger::DecideSnapshotRow, +) -> Result { + let event = ledger + .get_event(&row.event_id)? + .ok_or_else(|| AppError::Internal(anyhow::anyhow!("event {} not found", row.event_id)))?; + + let payload = &event.payload; + + // Resolve context: inline or blob + let context = if let Some(inline) = payload.get("context_inline") { + inline.clone() + } else if let Some(blob_ref) = payload.get("context_blob").and_then(|v| v.as_str()) { + let path = + edda_ledger::blob_get_path(&ledger.paths, blob_ref).map_err(AppError::Internal)?; + let bytes = std::fs::read(&path) + .map_err(|e| AppError::Internal(anyhow::anyhow!("read context blob: {e}")))?; + serde_json::from_slice(&bytes)? + } else { + serde_json::Value::Null + }; + + // Resolve result: inline or blob + let result = if let Some(inline) = payload.get("result_inline") { + inline.clone() + } else if let Some(blob_ref) = payload.get("result_blob").and_then(|v| v.as_str()) { + let path = + edda_ledger::blob_get_path(&ledger.paths, blob_ref).map_err(AppError::Internal)?; + let bytes = std::fs::read(&path) + .map_err(|e| AppError::Internal(anyhow::anyhow!("read result blob: {e}")))?; + serde_json::from_slice(&bytes)? + } else { + serde_json::Value::Null + }; + + Ok(serde_json::json!({ + "event_id": row.event_id, + "context_hash": row.context_hash, + "engine_version": row.engine_version, + "schema_version": row.schema_version, + "redaction_level": row.redaction_level, + "village_id": row.village_id, + "cycle_id": row.cycle_id, + "context": context, + "result": result, + "created_at": row.created_at, + })) +} + +/// Snapshot and village-related routes. +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/snapshot", post(post_snapshot)) + .route("/api/snapshots", get(get_snapshots)) + .route("/api/snapshots/{context_hash}", get(get_snapshots_by_hash)) + .route("/api/villages/{village_id}/stats", get(get_village_stats)) + .route("/api/patterns", get(get_patterns)) +} diff --git a/crates/edda-serve/src/api/stream.rs b/crates/edda-serve/src/api/stream.rs new file mode 100644 index 0000000..a00e979 --- /dev/null +++ b/crates/edda-serve/src/api/stream.rs @@ -0,0 +1,144 @@ +use std::convert::Infallible; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use axum::extract::{Query, State}; +use axum::http::HeaderMap; +use axum::response::sse::{Event as SseEvent, KeepAlive}; +use axum::response::Sse; +use axum::routing::get; +use axum::Router; +use serde::Deserialize; + +use crate::error::AppError; +use crate::state::AppState; + +// ── SSE Event Stream ── + +/// Query parameters for the SSE event stream endpoint. +#[derive(Deserialize)] +struct StreamParams { + /// Comma-separated event types to subscribe to (e.g. "decision,phase_change"). + /// If omitted, all event types are streamed. + types: Option, + /// Resume from this event_id (alternative to `Last-Event-ID` header). + since: Option, +} + +/// Map a ledger event to the SSE event name sent to clients. +/// +/// Decisions are stored as `note` events with a `decision` key in the payload, +/// so we check the payload in addition to the `event_type` field. +fn sse_event_name(event: &edda_core::Event) -> &'static str { + match event.event_type.as_str() { + "agent_phase_change" => "phase_change", + "approval_request" => "approval_pending", + "note" if event.payload.get("decision").is_some() => "decision", + _ => "new_event", + } +} + +/// `GET /api/events/stream` — Server-Sent Events endpoint. +/// +/// Streams new ledger events in real time using a poll-based approach +/// (queries SQLite rowid cursor every 2 seconds). +/// +/// Supports: +/// - `?types=decision,phase_change` — filter by SSE event type +/// - `?since=evt_xxx` or `Last-Event-ID` header — resume after disconnect +/// - 30-second keep-alive heartbeat +async fn get_event_stream( + State(state): State>, + Query(params): Query, + headers: HeaderMap, +) -> Result>>, AppError> { + // Determine the resume cursor: query param takes precedence over header. + let since = params.since.or_else(|| { + headers + .get("Last-Event-ID") + .and_then(|v| v.to_str().ok()) + .map(String::from) + }); + + // Parse type filter into a set for O(1) lookups. + let type_filter: Option> = params.types.map(|t| { + t.split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect() + }); + + // Resolve the initial cursor (rowid) from `since` event_id. + let mut cursor: i64 = if let Some(ref event_id) = since { + let ledger = state.open_ledger().context("GET /api/events/stream")?; + ledger.rowid_for_event_id(event_id)?.unwrap_or(0) + } else { + 0 + }; + + let repo_root = state.repo_root.clone(); + + let stream = async_stream::stream! { + let mut interval = tokio::time::interval(Duration::from_secs(2)); + loop { + interval.tick().await; + + let ledger = match edda_ledger::Ledger::open(&repo_root) { + Ok(l) => l, + Err(_) => continue, + }; + + let new_events = match ledger.events_after_rowid(cursor) { + Ok(evts) => evts, + Err(_) => continue, + }; + + if new_events.is_empty() { + continue; + } + + // Update cursor to the latest rowid. + if let Some((last_rowid, _)) = new_events.last() { + cursor = *last_rowid; + } + + for (_rowid, event) in new_events { + let sse_name = sse_event_name(&event); + + // Apply type filter if specified. + if let Some(ref filters) = type_filter { + if !filters.iter().any(|f| f == sse_name) { + continue; + } + } + + let event_id = event.event_id.clone(); + let data = serde_json::json!({ + "event_type": sse_name, + "data": serde_json::to_value(&event).unwrap_or_default(), + "ts": &event.ts, + }); + + let sse_event = SseEvent::default() + .event(sse_name) + .id(event_id) + .json_data(data) + .unwrap_or_else(|_| SseEvent::default().comment("serialization error")); + + yield Ok::<_, Infallible>(sse_event); + } + } + }; + + Ok(Sse::new(stream).keep_alive( + KeepAlive::new() + .interval(Duration::from_secs(30)) + .text("ping"), + )) +} + +/// SSE event stream routes. +pub(crate) fn routes() -> Router> { + Router::new().route("/api/events/stream", get(get_event_stream)) +} diff --git a/crates/edda-serve/src/api/telemetry.rs b/crates/edda-serve/src/api/telemetry.rs new file mode 100644 index 0000000..60c52f1 --- /dev/null +++ b/crates/edda-serve/src/api/telemetry.rs @@ -0,0 +1,335 @@ +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::rejection::JsonRejection; +use axum::extract::{Query, State}; +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use serde::{Deserialize, Serialize}; + +use edda_core::event::new_telemetry_event; +use edda_ledger::lock::WorkspaceLock; + +use crate::error::AppError; +use crate::state::AppState; + +// ── POST /api/telemetry ── + +#[derive(Deserialize)] +struct TelemetryBody { + cycle_id: String, + source: String, + started_at: String, + total_duration_ms: u64, + #[serde(default)] + operations: Vec, + #[serde(default)] + cost: Option, + #[serde(default)] + tags: Vec, + #[serde(default)] + metadata: Option, +} + +#[derive(Deserialize, Serialize)] +struct TelemetryOp { + name: String, + duration_ms: u64, + #[serde(default)] + token_usage: Option, + #[serde(default)] + status: Option, +} + +#[derive(Deserialize, Serialize)] +struct TelemetryTokenUsage { + input_tokens: u64, + output_tokens: u64, +} + +#[derive(Deserialize, Serialize)] +struct TelemetryCost { + total_usd: f64, + #[serde(default, skip_serializing_if = "Option::is_none")] + breakdown: Option>, +} + +#[derive(Serialize)] +struct TelemetryResponse { + event_id: String, + status: String, +} + +async fn post_telemetry( + State(state): State>, + body: Result, JsonRejection>, +) -> Result { + let Json(body) = body.map_err(|e| AppError::Validation(e.to_string()))?; + + // Serialize full body as payload + let payload = serde_json::json!({ + "cycle_id": body.cycle_id, + "source": body.source, + "started_at": body.started_at, + "total_duration_ms": body.total_duration_ms, + "operations": body.operations, + "cost": body.cost, + "tags": body.tags, + "metadata": body.metadata, + }); + + let ledger = state.open_ledger().context("POST /api/telemetry")?; + let _lock = WorkspaceLock::acquire(&ledger.paths)?; + let branch = ledger.head_branch()?; + let parent_hash = ledger.last_event_hash()?; + + let event = new_telemetry_event( + &branch, + parent_hash.as_deref(), + &body.cycle_id, + &body.started_at, + payload, + )?; + + let inserted = ledger.append_event_idempotent(&event)?; + + let response = TelemetryResponse { + event_id: event.event_id, + status: if inserted { + "created".to_string() + } else { + "duplicate".to_string() + }, + }; + + let status = if inserted { + StatusCode::CREATED + } else { + StatusCode::OK + }; + + Ok((status, Json(response)).into_response()) +} + +// ── GET /api/telemetry ── + +#[derive(Deserialize)] +struct TelemetryQuery { + #[serde(default)] + after: Option, + #[serde(default)] + before: Option, + #[serde(default)] + source: Option, + #[serde(default)] + limit: Option, +} + +async fn get_telemetry( + State(state): State>, + Query(q): Query, +) -> Result { + let ledger = state.open_ledger().context("GET /api/telemetry")?; + let branch = ledger.head_branch()?; + let limit = q.limit.unwrap_or(100); + + let events = ledger.iter_events_filtered( + &branch, + Some("cycle_telemetry"), + None, + q.after.as_deref(), + q.before.as_deref(), + limit, + )?; + + let mut payloads: Vec = events + .into_iter() + .map(|e| { + let mut p = e.payload; + // Inject event_id for cross-reference + if let Some(obj) = p.as_object_mut() { + obj.insert("event_id".to_string(), serde_json::json!(e.event_id)); + } + p + }) + .collect(); + + // Post-filter by source if specified + if let Some(ref source) = q.source { + payloads.retain(|p| { + p.get("source") + .and_then(|v| v.as_str()) + .is_some_and(|s| s == source) + }); + } + + Ok(Json(payloads).into_response()) +} + +// ── GET /api/telemetry/stats ── + +#[derive(Deserialize)] +struct TelemetryStatsQuery { + #[serde(default)] + days: Option, + #[serde(default)] + source: Option, +} + +async fn get_telemetry_stats( + State(state): State>, + Query(q): Query, +) -> Result { + let ledger = state.open_ledger().context("GET /api/telemetry/stats")?; + let branch = ledger.head_branch()?; + let days = q.days.unwrap_or(7); + + // Compute "after" date + let now = time::OffsetDateTime::now_utc(); + let after_date = now - time::Duration::days(i64::from(days)); + let after_str = after_date + .format(&time::format_description::well_known::Rfc3339) + .unwrap_or_default(); + + let events = ledger.iter_events_filtered( + &branch, + Some("cycle_telemetry"), + None, + Some(&after_str), + None, + 10_000, + )?; + + let mut payloads: Vec = events.into_iter().map(|e| e.payload).collect(); + + // Post-filter by source if specified + if let Some(ref source) = q.source { + payloads.retain(|p| { + p.get("source") + .and_then(|v| v.as_str()) + .is_some_and(|s| s == source) + }); + } + + let stats = compute_telemetry_stats(&payloads); + Ok(Json(stats).into_response()) +} + +/// Compute telemetry statistics from a set of cycle_telemetry payloads. +fn compute_telemetry_stats(payloads: &[serde_json::Value]) -> serde_json::Value { + let cycle_count = payloads.len(); + if cycle_count == 0 { + return serde_json::json!({ + "cycle_count": 0, + "avg_duration_ms": 0.0, + "p95_duration_ms": 0.0, + "total_cost_usd": 0.0, + "slowest_operations": [], + "error_rate": 0.0, + }); + } + + // Collect durations + let mut durations: Vec = payloads + .iter() + .filter_map(|p| p.get("total_duration_ms").and_then(|v| v.as_f64())) + .collect(); + durations.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)); + + let avg_duration_ms = if durations.is_empty() { + 0.0 + } else { + durations.iter().sum::() / durations.len() as f64 + }; + + let p95_duration_ms = if durations.is_empty() { + 0.0 + } else { + let idx = ((durations.len() as f64) * 0.95).ceil() as usize; + durations[idx.min(durations.len() - 1)] + }; + + // Total cost + let total_cost_usd: f64 = payloads + .iter() + .filter_map(|p| { + p.get("cost") + .and_then(|c| c.get("total_usd")) + .and_then(|v| v.as_f64()) + }) + .sum(); + + // Per-operation stats + let mut op_stats: std::collections::HashMap = + std::collections::HashMap::new(); // (sum_dur, max_dur, count, error_count) + + let mut total_ops = 0usize; + let mut total_errors = 0usize; + + for payload in payloads { + if let Some(ops) = payload.get("operations").and_then(|v| v.as_array()) { + for op in ops { + let name = op.get("name").and_then(|v| v.as_str()).unwrap_or("unknown"); + let dur = op.get("duration_ms").and_then(|v| v.as_u64()).unwrap_or(0); + let status = op.get("status").and_then(|v| v.as_str()).unwrap_or("ok"); + + let entry = op_stats.entry(name.to_string()).or_insert((0.0, 0, 0, 0)); + entry.0 += dur as f64; + if dur > entry.1 { + entry.1 = dur; + } + entry.2 += 1; + total_ops += 1; + if status == "error" { + entry.3 += 1; + total_errors += 1; + } + } + } + } + + // Build slowest operations (top 5 by avg duration) + let mut op_list: Vec = op_stats + .iter() + .map(|(name, (sum, max, count, _))| { + serde_json::json!({ + "name": name, + "avg_duration_ms": sum / *count as f64, + "max_duration_ms": max, + "count": count, + }) + }) + .collect(); + op_list.sort_by(|a, b| { + let a_avg = a["avg_duration_ms"].as_f64().unwrap_or(0.0); + let b_avg = b["avg_duration_ms"].as_f64().unwrap_or(0.0); + b_avg + .partial_cmp(&a_avg) + .unwrap_or(std::cmp::Ordering::Equal) + }); + op_list.truncate(5); + + let error_rate = if total_ops > 0 { + total_errors as f64 / total_ops as f64 + } else { + 0.0 + }; + + serde_json::json!({ + "cycle_count": cycle_count, + "avg_duration_ms": avg_duration_ms, + "p95_duration_ms": p95_duration_ms, + "total_cost_usd": total_cost_usd, + "slowest_operations": op_list, + "error_rate": error_rate, + }) +} + +/// Telemetry routes. +pub(crate) fn routes() -> Router> { + Router::new() + .route("/api/telemetry", post(post_telemetry).get(get_telemetry)) + .route("/api/telemetry/stats", get(get_telemetry_stats)) +} diff --git a/crates/edda-serve/src/error.rs b/crates/edda-serve/src/error.rs new file mode 100644 index 0000000..6401664 --- /dev/null +++ b/crates/edda-serve/src/error.rs @@ -0,0 +1,64 @@ +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use axum::Json; + +// ── Error Handling ── + +#[derive(Debug, thiserror::Error)] +pub(crate) enum AppError { + #[error("{0}")] + Validation(String), + + #[error("{0}")] + NotFound(String), + + #[error("{0}")] + Conflict(String), + + #[error("{0}")] + Unauthorized(String), + + #[error("{0}")] + Internal(#[from] anyhow::Error), +} + +impl From for AppError { + fn from(err: serde_json::Error) -> Self { + Self::Internal(err.into()) + } +} + +impl From for AppError { + fn from(err: serde_yaml::Error) -> Self { + Self::Internal(err.into()) + } +} + +impl From for AppError { + fn from(err: globset::Error) -> Self { + Self::Internal(err.into()) + } +} + +impl From for AppError { + fn from(err: std::io::Error) -> Self { + Self::Internal(err.into()) + } +} + +impl IntoResponse for AppError { + fn into_response(self) -> Response { + let (status, code) = match &self { + AppError::Validation(_) => (StatusCode::BAD_REQUEST, "VALIDATION_ERROR"), + AppError::NotFound(_) => (StatusCode::NOT_FOUND, "NOT_FOUND"), + AppError::Conflict(_) => (StatusCode::CONFLICT, "CONFLICT"), + AppError::Unauthorized(_) => (StatusCode::UNAUTHORIZED, "UNAUTHORIZED"), + AppError::Internal(_) => (StatusCode::INTERNAL_SERVER_ERROR, "INTERNAL_ERROR"), + }; + let body = serde_json::json!({ + "error": self.to_string(), + "code": code, + }); + (status, Json(body)).into_response() + } +} diff --git a/crates/edda-serve/src/helpers.rs b/crates/edda-serve/src/helpers.rs new file mode 100644 index 0000000..587bfdf --- /dev/null +++ b/crates/edda-serve/src/helpers.rs @@ -0,0 +1,12 @@ +/// Validate that a string looks like a valid ISO 8601 / RFC 3339 timestamp. +pub(crate) fn validate_iso8601(s: &str) -> Result<(), String> { + time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc3339) + .map(|_| ()) + .map_err(|_| format!("invalid ISO 8601 timestamp: {s}")) +} + +pub(crate) fn time_now_rfc3339() -> String { + time::OffsetDateTime::now_utc() + .format(&time::format_description::well_known::Rfc3339) + .expect("RFC3339 formatting should not fail") +} diff --git a/crates/edda-serve/src/lib.rs b/crates/edda-serve/src/lib.rs index 0d4eb58..2730e83 100644 --- a/crates/edda-serve/src/lib.rs +++ b/crates/edda-serve/src/lib.rs @@ -1,133 +1,35 @@ -use anyhow::Context; +mod api; +mod error; +mod helpers; +mod middleware; +mod state; + +pub use state::ServeConfig; +pub(crate) use state::{AppState, ChronicleContext}; + use std::collections::HashMap; -use std::convert::Infallible; use std::net::SocketAddr; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::sync::{Arc, Mutex}; -use std::time::Duration; -use axum::extract::rejection::JsonRejection; -use axum::extract::{ConnectInfo, Path as AxumPath, Query, State}; -use axum::http::{HeaderMap, Request, StatusCode}; -use axum::middleware::{self, Next}; -use axum::response::sse::{Event as SseEvent, KeepAlive}; -use axum::response::{IntoResponse, Response, Sse}; -use axum::routing::{get, post}; -use axum::{Json, Router}; -use edda_ledger::device_token::{generate_device_token, hash_token}; -use serde::{Deserialize, Serialize}; +use axum::middleware as axum_mw; +use axum::Router; use tower_http::cors::{AllowOrigin, CorsLayer}; -use edda_aggregate::aggregate::{ - aggregate_decisions, aggregate_overview, per_project_metrics, DateRange, ProjectMetrics, -}; -use edda_aggregate::controls::evaluate_controls_rules; -use edda_aggregate::graph::build_dependency_graph; -use edda_aggregate::quality::{model_quality_from_events, QualityReport}; -use edda_aggregate::risk::{compute_decision_risks, DecisionInput, DecisionRisk}; -use edda_aggregate::rollup; -use edda_core::agent_phase::{mobile_context_summary, AgentPhaseState}; -use edda_core::event::{ - finalize_event, new_approval_event, new_decision_event, new_execution_event, new_note_event, - new_snapshot_event, new_telemetry_event, ApprovalEventParams, -}; -use edda_core::policy::{self, ActorKind}; -use edda_core::types::{rel, DecisionPayload, Provenance}; -use edda_derive::{rebuild_branch, render_context, DeriveOptions}; -use edda_ledger::lock::WorkspaceLock; -use edda_ledger::Ledger; -use edda_store::registry::list_projects; - -// ── Config ── - -pub struct ServeConfig { - pub bind: String, - pub port: u16, -} - -// ── App State ── - -struct AppState { - repo_root: PathBuf, - chronicle: Option, - pending_pairings: Mutex>, -} - -struct PairingRequest { - device_name: String, - expires_at: std::time::Instant, -} - -struct ChronicleContext { - _store_root: PathBuf, -} - -impl AppState { - fn open_ledger(&self) -> anyhow::Result { - Ledger::open(&self.repo_root) - } -} - -// ── Error Handling ── - -#[derive(Debug, thiserror::Error)] -enum AppError { - #[error("{0}")] - Validation(String), - - #[error("{0}")] - NotFound(String), - - #[error("{0}")] - Conflict(String), - - #[error("{0}")] - Unauthorized(String), - - #[error("{0}")] - Internal(#[from] anyhow::Error), -} - -impl From for AppError { - fn from(err: serde_json::Error) -> Self { - Self::Internal(err.into()) - } -} - -impl From for AppError { - fn from(err: serde_yaml::Error) -> Self { - Self::Internal(err.into()) - } -} - -impl From for AppError { - fn from(err: globset::Error) -> Self { - Self::Internal(err.into()) - } -} - -impl From for AppError { - fn from(err: std::io::Error) -> Self { - Self::Internal(err.into()) - } -} - -impl IntoResponse for AppError { - fn into_response(self) -> Response { - let (status, code) = match &self { - AppError::Validation(_) => (StatusCode::BAD_REQUEST, "VALIDATION_ERROR"), - AppError::NotFound(_) => (StatusCode::NOT_FOUND, "NOT_FOUND"), - AppError::Conflict(_) => (StatusCode::CONFLICT, "CONFLICT"), - AppError::Unauthorized(_) => (StatusCode::UNAUTHORIZED, "UNAUTHORIZED"), - AppError::Internal(_) => (StatusCode::INTERNAL_SERVER_ERROR, "INTERNAL_ERROR"), - }; - let body = serde_json::json!({ - "error": self.to_string(), - "code": code, - }); - (status, Json(body)).into_response() - } -} +#[cfg(test)] +use crate::error::AppError; +#[cfg(test)] +use anyhow::Context; +#[cfg(test)] +use axum::extract::rejection::JsonRejection; +#[cfg(test)] +use axum::extract::State; +#[cfg(test)] +use axum::Json; +#[cfg(test)] +use serde::{Deserialize, Serialize}; +#[cfg(test)] +use std::path::PathBuf; // ── Entrypoint ── @@ -153,3324 +55,93 @@ pub async fn serve(repo_root: &Path, config: ServeConfig) -> anyhow::Result<()> }); // Public routes (no auth required) - let public_routes = Router::new() - .route("/api/health", get(health)) - .route("/pair", get(complete_pairing)); + let public_routes = api::auth::public_routes().merge(api::events::public_routes()); // Protected routes (auth middleware applied) - let protected_routes = Router::new() - .route("/api/status", get(get_status)) - .route("/api/context", get(get_context)) - .route("/api/decisions", get(get_decisions)) - .route("/api/decisions/batch", post(post_decisions_batch)) - .route( - "/api/decisions/{event_id}/outcomes", - get(get_decision_outcomes), - ) - .route("/api/decisions/{event_id}/chain", get(get_decision_chain)) - .route("/api/log", get(get_log)) - .route("/api/drafts", get(get_drafts)) - .route("/api/drafts/{id}/approve", post(post_draft_approve)) - .route("/api/drafts/{id}/deny", post(post_draft_deny)) - .route("/api/note", post(post_note)) - .route("/api/decide", post(post_decide)) - .route("/api/events/karvi", post(post_karvi_event)) - .route("/api/telemetry", post(post_telemetry).get(get_telemetry)) - .route("/api/telemetry/stats", get(get_telemetry_stats)) - .route("/api/scope/check", post(post_scope_check)) - .route("/api/scope/whitelist", get(get_scope_whitelist)) - .route("/api/authz/check", post(post_authz_check)) - .route("/api/approval/check", post(post_approval_check)) - .route("/api/tool-tier/{tool_name}", get(get_tool_tier)) - .route("/api/recap", get(get_recap)) - .route("/api/recap/cached", get(get_recap_cached)) - .route("/api/overview", get(get_overview)) - .route("/api/projects", get(get_projects)) - .route("/api/metrics/quality", get(get_quality_metrics)) - .route("/api/metrics/overview", get(get_metrics_overview)) - .route("/api/metrics/trends", get(get_metrics_trends)) - .route("/api/dashboard", get(get_dashboard)) - .route("/dashboard", get(serve_dashboard)) - .route("/api/actors", get(get_actors)) - .route("/api/actors/{name}", get(get_actor)) - .route("/api/briefs", get(get_briefs)) - .route("/api/briefs/{task_id}", get(get_brief)) - .route("/api/events/stream", get(get_event_stream)) - .route("/api/controls/suggestions", get(get_controls_suggestions)) - .route("/api/controls/patches", get(get_controls_patches)) - .route( - "/api/controls/patches/{patch_id}/approve", - post(post_approve_controls_patch), - ) - .route("/api/snapshot", post(post_snapshot)) - .route("/api/snapshots", get(get_snapshots)) - .route("/api/snapshots/{context_hash}", get(get_snapshots_by_hash)) - .route("/api/villages/{village_id}/stats", get(get_village_stats)) - .route("/api/patterns", get(get_patterns)) - .route("/api/ingestion/evaluate", post(post_ingestion_evaluate)) - .route( - "/api/ingestion/records", - post(post_ingestion_record).get(get_ingestion_records), - ) - .route("/api/ingestion/suggestions", get(get_ingestion_suggestions)) - .route( - "/api/ingestion/suggestions/{id}/accept", - post(post_suggestion_accept), - ) - .route( - "/api/ingestion/suggestions/{id}/reject", - post(post_suggestion_reject), - ) - .route("/api/pair/new", post(create_pairing)) - .route("/api/pair/list", get(list_paired_devices)) - .route("/api/pair/revoke", post(revoke_device)) - .route("/api/pair/revoke-all", post(revoke_all_devices)) - .layer(middleware::from_fn_with_state( + let protected_routes = api::events::protected_routes() + .merge(api::drafts::routes()) + .merge(api::telemetry::routes()) + .merge(api::snapshots::routes()) + .merge(api::analytics::routes()) + .merge(api::metrics::routes()) + .merge(api::dashboard::routes()) + .merge(api::policy::routes()) + .merge(api::briefs::routes()) + .merge(api::stream::routes()) + .merge(api::ingestion::routes()) + .merge(api::auth::protected_routes()) + .layer(axum_mw::from_fn_with_state( state.clone(), - auth_middleware, - )); - - // SECURITY: restrict CORS to localhost origins only. edda is a local - // development tool; if remote access is needed, consider adding an - // explicit --cors-origin CLI flag. - let cors = CorsLayer::new() - .allow_origin(AllowOrigin::list([ - format!("http://127.0.0.1:{}", config.port) - .parse() - .expect("valid localhost origin"), - format!("http://localhost:{}", config.port) - .parse() - .expect("valid localhost origin"), - format!("http://[::1]:{}", config.port) - .parse() - .expect("valid localhost origin"), - ])) - .allow_methods(tower_http::cors::Any) - .allow_headers(tower_http::cors::Any); - - let app = Router::new() - .merge(public_routes) - .merge(protected_routes) - .layer(cors) - .with_state(state); - - let addr = format!("{}:{}", config.bind, config.port); - let listener = tokio::net::TcpListener::bind(&addr).await?; - eprintln!("edda HTTP server listening on http://{addr}"); - axum::serve( - listener, - app.into_make_service_with_connect_info::(), - ) - .await?; - Ok(()) -} - -/// Build the router (for testing without binding to a port). -/// Note: no auth middleware is applied here — tests run as localhost. -#[cfg(test)] -fn router(repo_root: &Path) -> Router { - let store_root = edda_store::store_root(); - let chronicle = if store_root.exists() { - Some(ChronicleContext { - _store_root: store_root, - }) - } else { - None - }; - - let state = Arc::new(AppState { - repo_root: repo_root.to_path_buf(), - chronicle, - pending_pairings: Mutex::new(HashMap::new()), - }); - Router::new() - .route("/api/health", get(health)) - .route("/api/status", get(get_status)) - .route("/api/context", get(get_context)) - .route("/api/decisions", get(get_decisions)) - .route("/api/decisions/batch", post(post_decisions_batch)) - .route( - "/api/decisions/{event_id}/outcomes", - get(get_decision_outcomes), - ) - .route("/api/decisions/{event_id}/chain", get(get_decision_chain)) - .route("/api/log", get(get_log)) - .route("/api/drafts", get(get_drafts)) - .route("/api/drafts/{id}/approve", post(post_draft_approve)) - .route("/api/drafts/{id}/deny", post(post_draft_deny)) - .route("/api/note", post(post_note)) - .route("/api/decide", post(post_decide)) - .route("/api/events/karvi", post(post_karvi_event)) - .route("/api/telemetry", post(post_telemetry).get(get_telemetry)) - .route("/api/telemetry/stats", get(get_telemetry_stats)) - .route("/api/scope/check", post(post_scope_check)) - .route("/api/scope/whitelist", get(get_scope_whitelist)) - .route("/api/authz/check", post(post_authz_check)) - .route("/api/approval/check", post(post_approval_check)) - .route("/api/tool-tier/{tool_name}", get(get_tool_tier)) - .route("/api/recap", get(get_recap)) - .route("/api/recap/cached", get(get_recap_cached)) - .route("/api/overview", get(get_overview)) - .route("/api/projects", get(get_projects)) - .route("/api/actors", get(get_actors)) - .route("/api/actors/{name}", get(get_actor)) - .route("/api/briefs", get(get_briefs)) - .route("/api/briefs/{task_id}", get(get_brief)) - .route("/api/metrics/quality", get(get_quality_metrics)) - .route("/api/metrics/overview", get(get_metrics_overview)) - .route("/api/metrics/trends", get(get_metrics_trends)) - .route("/api/dashboard", get(get_dashboard)) - .route("/api/sync", post(post_sync)) - .route("/dashboard", get(serve_dashboard)) - .route("/api/events/stream", get(get_event_stream)) - .route("/api/controls/suggestions", get(get_controls_suggestions)) - .route("/api/controls/patches", get(get_controls_patches)) - .route( - "/api/controls/patches/{patch_id}/approve", - post(post_approve_controls_patch), - ) - .route("/api/snapshot", post(post_snapshot)) - .route("/api/snapshots", get(get_snapshots)) - .route("/api/snapshots/{context_hash}", get(get_snapshots_by_hash)) - .route("/api/villages/{village_id}/stats", get(get_village_stats)) - .route("/api/patterns", get(get_patterns)) - .route("/api/ingestion/evaluate", post(post_ingestion_evaluate)) - .route( - "/api/ingestion/records", - post(post_ingestion_record).get(get_ingestion_records), - ) - .route("/api/ingestion/suggestions", get(get_ingestion_suggestions)) - .route( - "/api/ingestion/suggestions/{id}/accept", - post(post_suggestion_accept), - ) - .route( - "/api/ingestion/suggestions/{id}/reject", - post(post_suggestion_reject), - ) - .route("/pair", get(complete_pairing)) - .route("/api/pair/new", post(create_pairing)) - .route("/api/pair/list", get(list_paired_devices)) - .route("/api/pair/revoke", post(revoke_device)) - .route("/api/pair/revoke-all", post(revoke_all_devices)) - .with_state(state) -} - -// ── Auth Middleware ── - -/// Check if a socket address is localhost. -fn is_localhost(addr: &SocketAddr) -> bool { - let ip = addr.ip(); - ip.is_loopback() - || match ip { - std::net::IpAddr::V6(v6) => { - // IPv4-mapped IPv6: ::ffff:127.0.0.1 - if let Some(v4) = v6.to_ipv4_mapped() { - v4.is_loopback() - } else { - false - } - } - _ => false, - } -} - -/// Generate a pairing token (random hex, shorter). -fn generate_pairing_token() -> String { - use rand::Rng; - let mut rng = rand::thread_rng(); - let mut bytes = [0u8; 16]; - rng.fill(&mut bytes); - hex::encode(bytes) -} - -/// Auth middleware: localhost passes through, remote needs Bearer token. -async fn auth_middleware( - ConnectInfo(addr): ConnectInfo, - State(state): State>, - req: Request, - next: Next, -) -> Result { - // Localhost: always allowed (backward compat) - if is_localhost(&addr) { - return Ok(next.run(req).await); - } - - // Remote: check Authorization header - let auth_header = req - .headers() - .get("authorization") - .and_then(|v| v.to_str().ok()); - - let raw_token = match auth_header { - Some(h) if h.starts_with("Bearer ") => &h[7..], - _ => { - return Err(AppError::Unauthorized( - "missing or invalid Authorization header".to_string(), - )); - } - }; - - let token_hash = hash_token(raw_token); - let ledger = state.open_ledger().context("auth_middleware")?; - let device = ledger.validate_device_token(&token_hash)?; - - match device { - Some(_) => Ok(next.run(req).await), - None => Err(AppError::Unauthorized( - "invalid or revoked device token".to_string(), - )), - } -} - -// ── Pairing Endpoints ── - -#[derive(Deserialize)] -struct CreatePairingRequest { - device_name: String, -} - -#[derive(Serialize)] -struct CreatePairingResponse { - pairing_url: String, - pairing_token: String, - expires_in_seconds: u64, -} - -/// POST /api/pair/new — Create a pairing request (generates one-time pairing token). -async fn create_pairing( - State(state): State>, - headers: HeaderMap, - body: Result, JsonRejection>, -) -> Result, AppError> { - let Json(req) = body.map_err(|e| AppError::Validation(e.to_string()))?; - - if req.device_name.is_empty() { - return Err(AppError::Validation("device_name is required".to_string())); - } - - let pairing_token = generate_pairing_token(); - let ttl = Duration::from_secs(600); // 10 minutes - - { - let mut pairings = state - .pending_pairings - .lock() - .map_err(|e| AppError::Internal(anyhow::anyhow!("lock poisoned: {e}")))?; - - // Clean up expired pairings - let now = std::time::Instant::now(); - pairings.retain(|_, v| v.expires_at > now); - - pairings.insert( - pairing_token.clone(), - PairingRequest { - device_name: req.device_name, - expires_at: now + ttl, - }, - ); - } - - // Determine host from request headers for URL construction - let host = headers - .get("host") - .and_then(|v| v.to_str().ok()) - .unwrap_or("localhost:7433"); - - let pairing_url = format!("http://{host}/pair?token={pairing_token}"); - - Ok(Json(CreatePairingResponse { - pairing_url, - pairing_token, - expires_in_seconds: 600, - })) -} - -#[derive(Deserialize)] -struct CompletePairingQuery { - token: String, -} - -#[derive(Serialize)] -struct CompletePairingResponse { - device_token: String, - device_name: String, -} - -/// GET /pair?token= — Complete pairing (the URL the device visits). -async fn complete_pairing( - State(state): State>, - headers: HeaderMap, - Query(query): Query, -) -> Result, AppError> { - // Extract and validate the pairing token - let pairing_req = { - let mut pairings = state - .pending_pairings - .lock() - .map_err(|e| AppError::Internal(anyhow::anyhow!("lock poisoned: {e}")))?; - - let now = std::time::Instant::now(); - pairings.retain(|_, v| v.expires_at > now); - - pairings.remove(&query.token) - }; - - let pairing_req = pairing_req - .ok_or_else(|| AppError::Validation("invalid or expired pairing token".to_string()))?; - - // Generate the long-lived device token - let device_token = generate_device_token(); - let token_hash = hash_token(&device_token); - - let now = time::OffsetDateTime::now_utc(); - let paired_at = now - .format(&time::format_description::well_known::Rfc3339) - .map_err(|e| AppError::Internal(anyhow::anyhow!("time format error: {e}")))?; - - let from_ip = headers - .get("x-forwarded-for") - .and_then(|v| v.to_str().ok()) - .unwrap_or("unknown") - .to_string(); - let event_id = format!("evt_{}", ulid::Ulid::new()); - - // Write device_pair event to ledger - let ledger = state.open_ledger().context("GET /pair")?; - let branch = ledger.head_branch()?; - - let payload = serde_json::json!({ - "device_name": pairing_req.device_name, - "paired_from_ip": from_ip, - "token_hash_prefix": &token_hash[..8], - }); - - let parent_hash = ledger.last_event_hash()?; - let mut event = edda_core::types::Event { - event_id: event_id.clone(), - ts: paired_at.clone(), - event_type: "device_pair".to_string(), - branch: branch.clone(), - parent_hash, - hash: String::new(), - payload, - refs: Default::default(), - schema_version: edda_core::types::SCHEMA_VERSION, - digests: vec![], - event_family: Some(edda_core::types::event_family::ADMIN.to_string()), - event_level: Some(edda_core::types::event_level::INFO.to_string()), - }; - - edda_core::event::finalize_event(&mut event)?; - ledger.append_event(&event)?; - - // Insert into device_tokens table - ledger.insert_device_token(&edda_ledger::DeviceTokenRow { - token_hash, - device_name: pairing_req.device_name.clone(), - paired_at, - paired_from_ip: from_ip, - revoked_at: None, - pair_event_id: event_id, - revoke_event_id: None, - })?; - - Ok(Json(CompletePairingResponse { - device_token, - device_name: pairing_req.device_name, - })) -} - -#[derive(Serialize)] -struct DeviceInfo { - device_name: String, - paired_at: String, - status: String, - revoked_at: Option, -} - -/// GET /api/pair/list — List all paired devices. -async fn list_paired_devices( - State(state): State>, -) -> Result>, AppError> { - let ledger = state.open_ledger().context("GET /api/pair/list")?; - let tokens = ledger.list_device_tokens()?; - - let devices: Vec = tokens - .into_iter() - .map(|t| DeviceInfo { - device_name: t.device_name, - paired_at: t.paired_at, - status: if t.revoked_at.is_some() { - "revoked".to_string() - } else { - "active".to_string() - }, - revoked_at: t.revoked_at, - }) - .collect(); - - Ok(Json(devices)) -} - -#[derive(Deserialize)] -struct RevokeDeviceRequest { - device_name: String, -} - -/// POST /api/pair/revoke — Revoke a specific device. -async fn revoke_device( - State(state): State>, - body: Result, JsonRejection>, -) -> Result, AppError> { - let Json(req) = body.map_err(|e| AppError::Validation(e.to_string()))?; - - let ledger = state.open_ledger().context("POST /api/pair/revoke")?; - - // Check the token exists *before* writing the ledger event - let existing = ledger.list_device_tokens()?; - let has_active = existing - .iter() - .any(|t| t.device_name == req.device_name && t.revoked_at.is_none()); - if !has_active { - return Err(AppError::NotFound(format!( - "no active device token found for '{}'", - req.device_name - ))); - } - - let event_id = format!("evt_{}", ulid::Ulid::new()); - let branch = ledger.head_branch()?; - - let now = time::OffsetDateTime::now_utc(); - let ts = now - .format(&time::format_description::well_known::Rfc3339) - .map_err(|e| AppError::Internal(anyhow::anyhow!("time format error: {e}")))?; - - let payload = serde_json::json!({ - "device_name": req.device_name, - }); - - let parent_hash = ledger.last_event_hash()?; - let mut event = edda_core::types::Event { - event_id: event_id.clone(), - ts, - event_type: "device_revoke".to_string(), - branch: branch.clone(), - parent_hash, - hash: String::new(), - payload, - refs: Default::default(), - schema_version: edda_core::types::SCHEMA_VERSION, - digests: vec![], - event_family: Some(edda_core::types::event_family::ADMIN.to_string()), - event_level: Some(edda_core::types::event_level::INFO.to_string()), - }; - - edda_core::event::finalize_event(&mut event)?; - ledger.append_event(&event)?; - ledger.revoke_device_token(&req.device_name, &event_id)?; - - Ok(Json(serde_json::json!({ - "ok": true, - "device_name": req.device_name, - "event_id": event_id, - }))) -} - -/// POST /api/pair/revoke-all — Revoke all active device tokens. -async fn revoke_all_devices( - State(state): State>, -) -> Result, AppError> { - let event_id = format!("evt_{}", ulid::Ulid::new()); - let ledger = state.open_ledger().context("POST /api/pair/revoke-all")?; - let branch = ledger.head_branch()?; - - let now = time::OffsetDateTime::now_utc(); - let ts = now - .format(&time::format_description::well_known::Rfc3339) - .map_err(|e| AppError::Internal(anyhow::anyhow!("time format error: {e}")))?; - - let payload = serde_json::json!({ "revoke_all": true }); - - let parent_hash = ledger.last_event_hash()?; - let mut event = edda_core::types::Event { - event_id: event_id.clone(), - ts, - event_type: "device_revoke".to_string(), - branch: branch.clone(), - parent_hash, - hash: String::new(), - payload, - refs: Default::default(), - schema_version: edda_core::types::SCHEMA_VERSION, - digests: vec![], - event_family: Some(edda_core::types::event_family::ADMIN.to_string()), - event_level: Some(edda_core::types::event_level::INFO.to_string()), - }; - - edda_core::event::finalize_event(&mut event)?; - ledger.append_event(&event)?; - - let count = ledger.revoke_all_device_tokens(&event_id)?; - - Ok(Json(serde_json::json!({ - "ok": true, - "revoked_count": count, - "event_id": event_id, - }))) -} - -// ── Health ── - -async fn health() -> Json { - Json(serde_json::json!({ "ok": true })) -} - -// ── GET /api/status ── - -#[derive(Serialize)] -struct StatusResponse { - branch: String, - last_commit: Option, - uncommitted_events: usize, -} - -#[derive(Serialize)] -struct LastCommit { - ts: String, - event_id: String, - title: String, -} - -async fn get_status(State(state): State>) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/status")?; - let head = ledger.head_branch()?; - let snap = rebuild_branch(&ledger, &head)?; - - let last_commit = snap.last_commit.as_ref().map(|c| LastCommit { - ts: c.ts.clone(), - event_id: c.event_id.clone(), - title: c.title.clone(), - }); - - Ok(Json(StatusResponse { - branch: head, - last_commit, - uncommitted_events: snap.uncommitted_events, - })) -} - -// ── GET /api/context ── - -#[derive(Deserialize)] -struct ContextQuery { - depth: Option, -} - -#[derive(Serialize)] -struct ContextResponse { - context: String, -} - -async fn get_context( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/context")?; - let head = ledger.head_branch()?; - let depth = params.depth.unwrap_or(5); - let text = render_context(&ledger, &head, DeriveOptions { depth })?; - Ok(Json(ContextResponse { context: text })) -} - -// ── GET /api/decisions ── - -#[derive(Deserialize)] -struct DecisionsQuery { - q: Option, - context_summary: Option, - limit: Option, - all: Option, - branch: Option, - /// ISO 8601 lower bound (inclusive) for temporal filtering. - after: Option, - /// ISO 8601 upper bound (inclusive) for temporal filtering. - before: Option, - /// Comma-separated tags to filter by (OR semantics). - tags: Option, - /// Filter decisions belonging to a specific village. - village_id: Option, -} - -/// Validate that a string looks like a valid ISO 8601 / RFC 3339 timestamp. -fn validate_iso8601(s: &str) -> Result<(), String> { - time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc3339) - .map(|_| ()) - .map_err(|_| format!("invalid ISO 8601 timestamp: {s}")) -} - -async fn get_decisions( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - if let Some(ref after) = params.after { - validate_iso8601(after).map_err(AppError::Validation)?; - } - if let Some(ref before) = params.before { - validate_iso8601(before).map_err(AppError::Validation)?; - } - - let ledger = state.open_ledger().context("GET /api/decisions")?; - let q = params - .q - .as_deref() - .or(params.context_summary.as_deref()) - .unwrap_or(""); - let tags: Vec = params - .tags - .as_deref() - .filter(|s| !s.is_empty()) - .map(|s| s.split(',').map(|t| t.trim().to_string()).collect()) - .unwrap_or_default(); - let opts = edda_ask::AskOptions { - limit: params.limit.unwrap_or(20), - include_superseded: params.all.unwrap_or(false), - branch: params.branch, - impact: false, - after: params.after, - before: params.before, - tags, - village_id: params.village_id, - }; - let result = edda_ask::ask(&ledger, q, &opts, None)?; - Ok(Json(result)) -} - -// ── POST /api/decisions/batch ── - -#[derive(Deserialize)] -struct BatchQuery { - queries: Vec, - #[serde(default)] - slim: bool, -} - -#[derive(Deserialize)] -struct BatchSubQuery { - #[serde(default)] - q: Option, - #[serde(default)] - context_summary: Option, - #[serde(default)] - domain: Option, - #[serde(default)] - limit: Option, - #[serde(default)] - branch: Option, - #[serde(default)] - all: Option, -} - -#[derive(Serialize)] -struct BatchResponse { - results: Vec, -} - -#[derive(Serialize)] -struct BatchSubResult { - query_index: usize, - #[serde(skip_serializing_if = "Option::is_none")] - decisions: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - timeline: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - related_commits: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - related_notes: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - conversations: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - error: Option, -} - -async fn post_decisions_batch( - State(state): State>, - body: Result, JsonRejection>, -) -> Result, AppError> { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - - if body.queries.is_empty() || body.queries.len() > 10 { - return Err(AppError::Validation( - "queries must contain 1\u{2013}10 items".into(), - )); - } - - let ledger = state.open_ledger().context("POST /api/decisions/batch")?; - let mut results = Vec::with_capacity(body.queries.len()); - - for (i, sub) in body.queries.iter().enumerate() { - let q = sub - .q - .as_deref() - .or(sub.context_summary.as_deref()) - .or(sub.domain.as_deref()) - .unwrap_or(""); - - let opts = edda_ask::AskOptions { - limit: sub.limit.unwrap_or(20).min(100), - include_superseded: sub.all.unwrap_or(false), - branch: sub.branch.clone(), - impact: false, - after: None, - before: None, - tags: vec![], - village_id: None, - }; - - match edda_ask::ask(&ledger, q, &opts, None) { - Ok(result) => { - if body.slim { - results.push(BatchSubResult { - query_index: i, - decisions: Some(result.decisions), - timeline: None, - related_commits: None, - related_notes: None, - conversations: None, - error: None, - }); - } else { - results.push(BatchSubResult { - query_index: i, - decisions: Some(result.decisions), - timeline: Some(result.timeline), - related_commits: Some(result.related_commits), - related_notes: Some(result.related_notes), - conversations: Some(result.conversations), - error: None, - }); - } - } - Err(e) => { - results.push(BatchSubResult { - query_index: i, - decisions: None, - timeline: None, - related_commits: None, - related_notes: None, - conversations: None, - error: Some(e.to_string()), - }); - } - } - } - - Ok(Json(BatchResponse { results })) -} - -// ── GET /api/decisions/:event_id/outcomes ── - -async fn get_decision_outcomes( - State(state): State>, - AxumPath(event_id): AxumPath, -) -> Result { - let ledger = state - .open_ledger() - .context("GET /api/decisions/:id/outcomes")?; - let outcomes = ledger.decision_outcomes(&event_id)?; - - match outcomes { - Some(metrics) => { - let json = serde_json::to_value(metrics)?; - Ok(Json(json).into_response()) - } - None => Err(AppError::NotFound(format!( - "decision not found: {}", - event_id - ))), - } -} - -// ── GET /api/decisions/:event_id/chain ── - -#[derive(Deserialize)] -struct ChainQuery { - depth: Option, -} - -#[derive(Serialize)] -struct ChainResponse { - root: ChainNodeResponse, - chain: Vec, - meta: ChainMeta, -} - -#[derive(Serialize)] -struct ChainNodeResponse { - event_id: String, - key: String, - value: String, - reason: String, - #[serde(skip_serializing_if = "Option::is_none")] - relation: Option, - #[serde(skip_serializing_if = "Option::is_none")] - depth: Option, - ts: String, - is_active: bool, -} - -#[derive(Serialize)] -struct ChainMeta { - max_depth: usize, - total_nodes: usize, -} - -async fn get_decision_chain( - State(state): State>, - AxumPath(event_id): AxumPath, - Query(params): Query, -) -> Result, AppError> { - let depth = params.depth.unwrap_or(3).min(10); - let ledger = state - .open_ledger() - .context("GET /api/decisions/:id/chain")?; - - let (root, chain) = ledger - .causal_chain(&event_id, depth)? - .ok_or_else(|| AppError::NotFound(format!("decision not found: {}", event_id)))?; - - let root_node = ChainNodeResponse { - event_id: root.event_id, - key: root.key, - value: root.value, - reason: root.reason, - relation: None, - depth: None, - ts: root.ts.unwrap_or_default(), - is_active: root.is_active, - }; - - let chain_nodes: Vec = chain - .into_iter() - .map(|entry| ChainNodeResponse { - event_id: entry.decision.event_id, - key: entry.decision.key, - value: entry.decision.value, - reason: entry.decision.reason, - relation: Some(entry.relation), - depth: Some(entry.depth), - ts: entry.decision.ts.unwrap_or_default(), - is_active: entry.decision.is_active, - }) - .collect(); - - let total_nodes = 1 + chain_nodes.len(); - Ok(Json(ChainResponse { - root: root_node, - chain: chain_nodes, - meta: ChainMeta { - max_depth: depth, - total_nodes, - }, - })) -} - -// ── GET /api/log ── - -#[derive(Deserialize)] -struct LogQuery { - r#type: Option, - keyword: Option, - after: Option, - before: Option, - limit: Option, -} - -#[derive(Serialize)] -struct LogEntry { - ts: String, - #[serde(rename = "type")] - event_type: String, - event_id: String, - branch: String, - #[serde(rename = "summary")] - detail: String, - tags: Vec, -} - -#[derive(Serialize)] -struct LogResponse { - events: Vec, -} - -async fn get_log( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/log")?; - let head = ledger.head_branch()?; - let limit = params.limit.unwrap_or(50); - - let events = ledger.iter_events_filtered( - &head, - params.r#type.as_deref(), - params.keyword.as_deref(), - params.after.as_deref(), - params.before.as_deref(), - limit, - )?; - - let results: Vec = events - .iter() - .map(|e| { - let detail = e - .payload - .get("text") - .and_then(|v| v.as_str()) - .or_else(|| e.payload.get("title").and_then(|v| v.as_str())) - .unwrap_or("") - .to_string(); - let tags: Vec = e - .payload - .get("tags") - .and_then(|v| v.as_array()) - .map(|arr| { - arr.iter() - .filter_map(|v| v.as_str().map(String::from)) - .collect() - }) - .unwrap_or_default(); - LogEntry { - ts: e.ts.clone(), - event_type: e.event_type.clone(), - event_id: e.event_id.clone(), - branch: e.branch.clone(), - detail, - tags, - } - }) - .collect(); - - Ok(Json(LogResponse { events: results })) -} - -// ── GET /api/drafts ── - -#[derive(Serialize)] -struct DraftItem { - draft_id: String, - title: String, - stage_id: String, - role: String, - approved: usize, - min_approvals: usize, - #[serde(skip_serializing_if = "Option::is_none")] - risk_level: Option, - #[serde(skip_serializing_if = "Option::is_none")] - phase: Option, - #[serde(skip_serializing_if = "Option::is_none")] - agent: Option, - #[serde(skip_serializing_if = "Option::is_none")] - issue: Option, - #[serde(skip_serializing_if = "Option::is_none")] - context_summary: Option, - #[serde(skip_serializing_if = "Option::is_none")] - requested_at: Option, - #[serde(default, skip_serializing_if = "Vec::is_empty")] - labels: Vec, -} - -#[derive(Serialize)] -struct DraftsResponse { - drafts: Vec, -} - -#[derive(Deserialize)] -struct MinimalDraft { - #[serde(default)] - draft_id: String, - #[serde(default)] - title: String, - #[serde(default)] - status: String, - #[serde(default)] - stages: Vec, - #[serde(default)] - labels: Vec, - #[serde(default)] - created_at: Option, - #[serde(default)] - branch: String, -} - -#[derive(Deserialize)] -struct MinimalStage { - #[serde(default)] - stage_id: String, - #[serde(default)] - role: String, - #[serde(default)] - min_approvals: usize, - #[serde(default)] - approved_by: Vec, - #[serde(default)] - status: String, -} - -async fn get_drafts(State(state): State>) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/drafts")?; - let drafts_dir = &ledger.paths.drafts_dir; - - if !drafts_dir.exists() { - return Ok(Json(DraftsResponse { drafts: vec![] })); - } - - // Load agent phase states for context enrichment - let phase_states = load_agent_phase_states(&state.repo_root); - - // Load recent decisions/commits for context summary - let head = ledger.head_branch().unwrap_or_default(); - let recent_decisions = recent_decision_summaries(&ledger, &head, 3); - let recent_commits = recent_commit_summaries(&ledger, &head, 3); - - let mut items = Vec::new(); - for entry in std::fs::read_dir(drafts_dir)? { - let entry = entry?; - let path = entry.path(); - - if path.extension().and_then(|e| e.to_str()) != Some("json") { - continue; - } - if path.file_stem().and_then(|s| s.to_str()) == Some("latest") { - continue; - } - - let content = std::fs::read_to_string(&path)?; - let draft: MinimalDraft = match serde_json::from_str(&content) { - Ok(d) => d, - Err(_) => continue, - }; - - if draft.status == "applied" { - continue; - } - - // Try to find a matching agent phase state (by branch or label) - let matched_phase = phase_states.iter().find(|ps| { - ps.branch.as_deref() == Some(&draft.branch) || ps.session_id == draft.draft_id - }); - - let (phase, agent, issue, context_summary) = if let Some(ps) = matched_phase { - let summary = mobile_context_summary(ps, &recent_decisions, &recent_commits, 200); - ( - Some(ps.phase.to_string()), - Some(ps.session_id.clone()), - ps.issue, - Some(summary), - ) - } else { - (None, None, None, None) - }; - - // Derive risk_level from labels if present - let risk_level = draft - .labels - .iter() - .find(|l| l.starts_with("risk:") || l.contains("risk")) - .map(|l| l.strip_prefix("risk:").unwrap_or(l).to_string()) - .or_else(|| { - if draft.labels.iter().any(|l| l == "high-risk") { - Some("high".to_string()) - } else { - None - } - }); - - for stage in &draft.stages { - if stage.status != "pending" { - continue; - } - items.push(DraftItem { - draft_id: draft.draft_id.clone(), - title: draft.title.clone(), - stage_id: stage.stage_id.clone(), - role: stage.role.clone(), - approved: stage.approved_by.len(), - min_approvals: stage.min_approvals, - risk_level: risk_level.clone(), - phase: phase.clone(), - agent: agent.clone(), - issue, - context_summary: context_summary.clone(), - requested_at: draft.created_at.clone(), - labels: draft.labels.clone(), - }); - } - } - - Ok(Json(DraftsResponse { drafts: items })) -} - -/// Load agent phase state files from `.edda/agent-phases/`. -fn load_agent_phase_states(repo_root: &Path) -> Vec { - let phases_dir = repo_root.join(".edda").join("agent-phases"); - if !phases_dir.exists() { - return Vec::new(); - } - let entries = match std::fs::read_dir(&phases_dir) { - Ok(e) => e, - Err(_) => return Vec::new(), - }; - let mut states = Vec::new(); - for entry in entries.flatten() { - let path = entry.path(); - if path.extension().and_then(|e| e.to_str()) != Some("json") { - continue; - } - if let Ok(content) = std::fs::read_to_string(&path) { - if let Ok(ps) = serde_json::from_str::(&content) { - states.push(ps); - } - } - } - states -} - -/// Fetch recent decision summaries from the ledger for context generation. -fn recent_decision_summaries(ledger: &Ledger, branch: &str, limit: usize) -> Vec { - let events = ledger - .iter_events_filtered(branch, Some("decision"), None, None, None, limit) - .unwrap_or_default(); - events - .iter() - .filter_map(|e| { - let key = e.payload.get("key")?.as_str()?; - let value = e.payload.get("value")?.as_str()?; - Some(format!("{key}={value}")) - }) - .collect() -} - -/// Fetch recent commit summaries from the ledger for context generation. -fn recent_commit_summaries(ledger: &Ledger, branch: &str, limit: usize) -> Vec { - let events = ledger - .iter_events_filtered(branch, Some("commit"), None, None, None, limit) - .unwrap_or_default(); - events - .iter() - .filter_map(|e| { - e.payload - .get("title") - .and_then(|v| v.as_str()) - .map(|s| s.to_string()) - }) - .collect() -} - -// ── POST /api/drafts/:id/approve ── - -#[derive(Deserialize)] -struct ApproveRequest { - #[serde(default)] - reason: Option, - #[serde(default)] - actor: Option, - #[serde(default)] - stage: Option, -} - -#[derive(Serialize)] -struct ApprovalResponse { - event_id: String, - draft_status: String, - stage_status: String, -} - -async fn post_draft_approve( - State(state): State>, - headers: HeaderMap, - AxumPath(draft_id): AxumPath, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - handle_draft_action(&state, &headers, &draft_id, "approve", &body).await -} - -// ── POST /api/drafts/:id/deny ── - -async fn post_draft_deny( - State(state): State>, - headers: HeaderMap, - AxumPath(draft_id): AxumPath, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - handle_draft_action(&state, &headers, &draft_id, "deny", &body).await -} - -/// Shared handler for approve/deny actions on drafts. -async fn handle_draft_action( - state: &AppState, - headers: &HeaderMap, - draft_id: &str, - action: &str, - body: &ApproveRequest, -) -> Result { - let ledger = state.open_ledger().context("POST /api/drafts/:id/action")?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - // Read the draft - let draft_path = ledger.paths.drafts_dir.join(format!("{draft_id}.json")); - if !draft_path.exists() { - return Err(AppError::NotFound(format!("draft not found: {draft_id}"))); - } - let content = std::fs::read_to_string(&draft_path)?; - let mut draft: serde_json::Value = serde_json::from_str(&content)?; - - // Check draft status - let draft_status = draft - .get("status") - .and_then(|v| v.as_str()) - .unwrap_or("proposed"); - if draft_status == "applied" || draft_status == "rejected" { - return Err(AppError::Conflict(format!( - "draft {draft_id} is already {draft_status}" - ))); - } - - let actor = body.actor.as_deref().unwrap_or("human"); - let reason = body.reason.as_deref().unwrap_or(""); - let device_id = headers - .get("x-edda-device-id") - .and_then(|v| v.to_str().ok()); - - let decision = if action == "approve" { - "approve" - } else { - "reject" - }; - - let head = ledger.head_branch()?; - - // Compute draft SHA256 - let draft_sha256 = { - use sha2::Digest; - let bytes = std::fs::read(&draft_path)?; - let mut hasher = sha2::Sha256::new(); - hasher.update(&bytes); - hex::encode(hasher.finalize()) - }; - - let parent_hash = ledger.last_event_hash()?; - - // Handle stage-aware drafts - let stages = draft - .get("stages") - .and_then(|v| v.as_array()) - .cloned() - .unwrap_or_default(); - - let (stage_id, stage_role, stage_status) = if !stages.is_empty() { - // Determine which stage to act on - let requested_stage = body.stage.as_deref(); - let target_stage = if let Some(sid) = requested_stage { - stages - .iter() - .find(|s| s.get("stage_id").and_then(|v| v.as_str()) == Some(sid)) - .ok_or_else(|| AppError::NotFound(format!("stage not found: {sid}")))? - } else { - // Auto-select the first pending stage - stages - .iter() - .find(|s| s.get("status").and_then(|v| v.as_str()) == Some("pending")) - .ok_or_else(|| AppError::Conflict("no pending stages remain".to_string()))? - }; - - let sid = target_stage - .get("stage_id") - .and_then(|v| v.as_str()) - .unwrap_or("") - .to_string(); - let role = target_stage - .get("role") - .and_then(|v| v.as_str()) - .unwrap_or("") - .to_string(); - let st_status = target_stage - .get("status") - .and_then(|v| v.as_str()) - .unwrap_or("pending") - .to_string(); - - if st_status != "pending" { - return Err(AppError::Conflict(format!( - "stage '{sid}' is already {st_status}" - ))); - } - - (sid, role, st_status) - } else { - (String::new(), String::new(), "pending".to_string()) - }; - - // Replay protection: stage already acted on - if stage_status != "pending" { - return Err(AppError::Conflict(format!( - "draft {draft_id} stage '{}' is already {stage_status}", - stage_id - ))); - } - - // Create approval event - let event = new_approval_event(&ApprovalEventParams { - branch: &head, - parent_hash: parent_hash.as_deref(), - draft_id, - draft_sha256: &draft_sha256, - decision, - actor, - note: reason, - stage_id: &stage_id, - role: &stage_role, - device_id, - })?; - ledger.append_event(&event)?; - - // Update draft JSON - let ts = event.ts.clone(); - let approval_record = serde_json::json!({ - "ts": ts, - "actor": actor, - "decision": decision, - "note": reason, - "approval_event_id": event.event_id, - "stage_id": stage_id, - "role": stage_role, - }); - - // Append to approvals array - if let Some(approvals) = draft.get_mut("approvals") { - if let Some(arr) = approvals.as_array_mut() { - arr.push(approval_record); - } - } else { - draft["approvals"] = serde_json::json!([approval_record]); - } - - // Update stage status - let mut new_stage_status = "pending".to_string(); - if let Some(stages_arr) = draft.get_mut("stages").and_then(|v| v.as_array_mut()) { - for stage in stages_arr.iter_mut() { - let sid = stage.get("stage_id").and_then(|v| v.as_str()).unwrap_or(""); - if sid == stage_id { - if decision == "reject" { - stage["status"] = serde_json::Value::String("rejected".to_string()); - new_stage_status = "rejected".to_string(); - } else { - // Read min_approvals first to avoid borrow conflict - let min = stage - .get("min_approvals") - .and_then(|v| v.as_u64()) - .unwrap_or(1) as usize; - // Add actor to approved_by - if let Some(ab) = stage.get_mut("approved_by") { - if let Some(arr) = ab.as_array_mut() { - let actor_val = serde_json::Value::String(actor.to_string()); - if !arr.contains(&actor_val) { - arr.push(actor_val); - } - if arr.len() >= min { - new_stage_status = "approved".to_string(); - } - } - } - if new_stage_status == "approved" { - stage["status"] = serde_json::Value::String("approved".to_string()); - } - } - break; - } - } - - // Update draft-level status - let all_approved = stages_arr - .iter() - .all(|s| s.get("status").and_then(|v| v.as_str()) == Some("approved")); - let any_rejected = stages_arr - .iter() - .any(|s| s.get("status").and_then(|v| v.as_str()) == Some("rejected")); - - if any_rejected { - draft["status"] = serde_json::Value::String("rejected".to_string()); - } else if all_approved { - draft["status"] = serde_json::Value::String("approved".to_string()); - } - } else { - // Flat (no stages) draft - if decision == "reject" { - draft["status"] = serde_json::Value::String("rejected".to_string()); - new_stage_status = "rejected".to_string(); - } else { - let min = draft - .get("policy_min_approvals") - .and_then(|v| v.as_u64()) - .unwrap_or(1) as usize; - let count = draft - .get("approvals") - .and_then(|v| v.as_array()) - .map(|a| { - a.iter() - .filter(|r| r.get("decision").and_then(|v| v.as_str()) == Some("approve")) - .count() - }) - .unwrap_or(0); - if count >= min.max(1) { - draft["status"] = serde_json::Value::String("approved".to_string()); - new_stage_status = "approved".to_string(); - } - } - } - - let final_draft_status = draft - .get("status") - .and_then(|v| v.as_str()) - .unwrap_or("proposed") - .to_string(); - - // Write updated draft - std::fs::write(&draft_path, serde_json::to_string_pretty(&draft)?)?; - - // Rebuild derived state - let snap_branch = ledger.head_branch()?; - let _ = edda_derive::rebuild_branch(&ledger, &snap_branch); - - let resp = ApprovalResponse { - event_id: event.event_id, - draft_status: final_draft_status, - stage_status: new_stage_status, - }; - - Ok((StatusCode::OK, Json(resp)).into_response()) -} - -// ── POST /api/note ── - -#[derive(Deserialize)] -struct NoteBody { - text: String, - role: Option, - tags: Option>, -} - -#[derive(Serialize)] -struct EventResponse { - event_id: String, -} - -async fn post_note( - State(state): State>, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - - let ledger = state.open_ledger().context("POST /api/note")?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - let branch = ledger.head_branch()?; - let parent_hash = ledger.last_event_hash()?; - let role = body.role.as_deref().unwrap_or("user"); - let tags = body.tags.unwrap_or_default(); - - let event = new_note_event(&branch, parent_hash.as_deref(), role, &body.text, &tags)?; - ledger.append_event(&event)?; - - Ok(( - StatusCode::CREATED, - Json(EventResponse { - event_id: event.event_id, - }), - )) -} - -// ── POST /api/decide ── - -#[derive(Deserialize)] -struct DecideBody { - decision: String, - reason: Option, -} - -#[derive(Serialize)] -struct DecideResponse { - event_id: String, - #[serde(skip_serializing_if = "Option::is_none")] - superseded: Option, -} - -async fn post_decide( - State(state): State>, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - - let (key, value) = body.decision.split_once('=').ok_or_else(|| { - AppError::Validation( - "decision must be in key=value format (e.g. \"db.engine=postgres\")".into(), - ) - })?; - let key = key.trim(); - let value = value.trim(); - - let ledger = state.open_ledger().context("POST /api/decide")?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - let branch = ledger.head_branch()?; - let parent_hash = ledger.last_event_hash()?; - - let dp = DecisionPayload { - key: key.to_string(), - value: value.to_string(), - reason: body.reason, - scope: None, - authority: None, - affected_paths: None, - tags: None, - review_after: None, - reversibility: None, - village_id: None, - }; - let mut event = new_decision_event(&branch, parent_hash.as_deref(), "system", &dp)?; - - // Auto-supersede: find prior decision with same key via SQL index - let prior = ledger.find_active_decision(&branch, key)?; - let mut superseded = None; - if let Some(ref row) = prior { - if row.value != value { - superseded = Some(row.event_id.clone()); - event.refs.provenance.push(Provenance { - target: row.event_id.clone(), - rel: rel::SUPERSEDES.to_string(), - note: Some(format!("key '{}' re-decided", key)), - }); - } - } - - finalize_event(&mut event)?; - ledger.append_event(&event)?; - - Ok(( - StatusCode::CREATED, - Json(DecideResponse { - event_id: event.event_id, - superseded, - }), - )) -} - -// ── POST /api/events/karvi ── - -#[derive(Deserialize)] -struct KarviEventBody { - version: String, - event_id: String, - event_type: String, - occurred_at: String, - #[serde(default)] - trace_id: Option, - #[serde(default)] - task_id: Option, - #[serde(default)] - step_id: Option, - #[serde(default)] - project: Option, - #[serde(default)] - runtime: Option, - #[serde(default)] - model: Option, - #[serde(default)] - actor: Option, - #[serde(default)] - usage: Option, - #[serde(default)] - result: Option, - #[serde(default)] - decision_ref: Option, -} - -#[derive(Serialize)] -struct KarviEventResponse { - event_id: String, - status: String, -} - -const VALID_KARVI_EVENT_TYPES: &[&str] = &["step_completed", "step_failed", "step_cancelled"]; - -async fn post_karvi_event( - State(state): State>, - Json(body): Json, -) -> Result { - // Validate version - if body.version != "karvi.event.v1" { - let err = serde_json::json!({ - "error": format!("unsupported version: {}", body.version), - }); - return Ok((StatusCode::BAD_REQUEST, Json(err)).into_response()); - } - - // Validate event_type - if !VALID_KARVI_EVENT_TYPES.contains(&body.event_type.as_str()) { - let err = serde_json::json!({ - "error": format!("unsupported event_type: {}", body.event_type), - }); - return Ok((StatusCode::BAD_REQUEST, Json(err)).into_response()); - } - - // Serialize full body as payload - let payload = serde_json::json!({ - "version": body.version, - "event_id": body.event_id, - "event_type": body.event_type, - "occurred_at": body.occurred_at, - "trace_id": body.trace_id, - "task_id": body.task_id, - "step_id": body.step_id, - "project": body.project, - "runtime": body.runtime, - "model": body.model, - "actor": body.actor, - "usage": body.usage, - "result": body.result, - "decision_ref": body.decision_ref, - }); - - let ledger = state.open_ledger().context("POST /api/events/karvi")?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - let branch = ledger.head_branch()?; - let parent_hash = ledger.last_event_hash()?; - - let event = new_execution_event( - &branch, - parent_hash.as_deref(), - &body.event_id, - &body.occurred_at, - payload, - body.decision_ref.as_deref(), - )?; - - let inserted = ledger.append_event_idempotent(&event)?; - - let response = KarviEventResponse { - event_id: event.event_id, - status: if inserted { - "created".to_string() - } else { - "duplicate".to_string() - }, - }; - - let status = if inserted { - StatusCode::CREATED - } else { - StatusCode::OK - }; - - Ok((status, Json(response)).into_response()) -} - -// ── POST /api/telemetry ── - -#[derive(Deserialize)] -struct TelemetryBody { - cycle_id: String, - source: String, - started_at: String, - total_duration_ms: u64, - #[serde(default)] - operations: Vec, - #[serde(default)] - cost: Option, - #[serde(default)] - tags: Vec, - #[serde(default)] - metadata: Option, -} - -#[derive(Deserialize, Serialize)] -struct TelemetryOp { - name: String, - duration_ms: u64, - #[serde(default)] - token_usage: Option, - #[serde(default)] - status: Option, -} - -#[derive(Deserialize, Serialize)] -struct TelemetryTokenUsage { - input_tokens: u64, - output_tokens: u64, -} - -#[derive(Deserialize, Serialize)] -struct TelemetryCost { - total_usd: f64, - #[serde(default, skip_serializing_if = "Option::is_none")] - breakdown: Option>, -} - -#[derive(Serialize)] -struct TelemetryResponse { - event_id: String, - status: String, -} - -async fn post_telemetry( - State(state): State>, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.to_string()))?; - - // Serialize full body as payload - let payload = serde_json::json!({ - "cycle_id": body.cycle_id, - "source": body.source, - "started_at": body.started_at, - "total_duration_ms": body.total_duration_ms, - "operations": body.operations, - "cost": body.cost, - "tags": body.tags, - "metadata": body.metadata, - }); - - let ledger = state.open_ledger().context("POST /api/telemetry")?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - let branch = ledger.head_branch()?; - let parent_hash = ledger.last_event_hash()?; - - let event = new_telemetry_event( - &branch, - parent_hash.as_deref(), - &body.cycle_id, - &body.started_at, - payload, - )?; - - let inserted = ledger.append_event_idempotent(&event)?; - - let response = TelemetryResponse { - event_id: event.event_id, - status: if inserted { - "created".to_string() - } else { - "duplicate".to_string() - }, - }; - - let status = if inserted { - StatusCode::CREATED - } else { - StatusCode::OK - }; - - Ok((status, Json(response)).into_response()) -} - -// ── GET /api/telemetry ── - -#[derive(Deserialize)] -struct TelemetryQuery { - #[serde(default)] - after: Option, - #[serde(default)] - before: Option, - #[serde(default)] - source: Option, - #[serde(default)] - limit: Option, -} - -async fn get_telemetry( - State(state): State>, - Query(q): Query, -) -> Result { - let ledger = state.open_ledger().context("GET /api/telemetry")?; - let branch = ledger.head_branch()?; - let limit = q.limit.unwrap_or(100); - - let events = ledger.iter_events_filtered( - &branch, - Some("cycle_telemetry"), - None, - q.after.as_deref(), - q.before.as_deref(), - limit, - )?; - - let mut payloads: Vec = events - .into_iter() - .map(|e| { - let mut p = e.payload; - // Inject event_id for cross-reference - if let Some(obj) = p.as_object_mut() { - obj.insert("event_id".to_string(), serde_json::json!(e.event_id)); - } - p - }) - .collect(); - - // Post-filter by source if specified - if let Some(ref source) = q.source { - payloads.retain(|p| { - p.get("source") - .and_then(|v| v.as_str()) - .is_some_and(|s| s == source) - }); - } - - Ok(Json(payloads).into_response()) -} - -// ── GET /api/telemetry/stats ── - -#[derive(Deserialize)] -struct TelemetryStatsQuery { - #[serde(default)] - days: Option, - #[serde(default)] - source: Option, -} - -async fn get_telemetry_stats( - State(state): State>, - Query(q): Query, -) -> Result { - let ledger = state.open_ledger().context("GET /api/telemetry/stats")?; - let branch = ledger.head_branch()?; - let days = q.days.unwrap_or(7); - - // Compute "after" date - let now = time::OffsetDateTime::now_utc(); - let after_date = now - time::Duration::days(i64::from(days)); - let after_str = after_date - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - - let events = ledger.iter_events_filtered( - &branch, - Some("cycle_telemetry"), - None, - Some(&after_str), - None, - 10_000, - )?; - - let mut payloads: Vec = events.into_iter().map(|e| e.payload).collect(); - - // Post-filter by source if specified - if let Some(ref source) = q.source { - payloads.retain(|p| { - p.get("source") - .and_then(|v| v.as_str()) - .is_some_and(|s| s == source) - }); - } - - let stats = compute_telemetry_stats(&payloads); - Ok(Json(stats).into_response()) -} - -/// Compute telemetry statistics from a set of cycle_telemetry payloads. -fn compute_telemetry_stats(payloads: &[serde_json::Value]) -> serde_json::Value { - let cycle_count = payloads.len(); - if cycle_count == 0 { - return serde_json::json!({ - "cycle_count": 0, - "avg_duration_ms": 0.0, - "p95_duration_ms": 0.0, - "total_cost_usd": 0.0, - "slowest_operations": [], - "error_rate": 0.0, - }); - } - - // Collect durations - let mut durations: Vec = payloads - .iter() - .filter_map(|p| p.get("total_duration_ms").and_then(|v| v.as_f64())) - .collect(); - durations.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)); - - let avg_duration_ms = if durations.is_empty() { - 0.0 - } else { - durations.iter().sum::() / durations.len() as f64 - }; - - let p95_duration_ms = if durations.is_empty() { - 0.0 - } else { - let idx = ((durations.len() as f64) * 0.95).ceil() as usize; - durations[idx.min(durations.len() - 1)] - }; - - // Total cost - let total_cost_usd: f64 = payloads - .iter() - .filter_map(|p| { - p.get("cost") - .and_then(|c| c.get("total_usd")) - .and_then(|v| v.as_f64()) - }) - .sum(); - - // Per-operation stats - let mut op_stats: std::collections::HashMap = - std::collections::HashMap::new(); // (sum_dur, max_dur, count, error_count) - - let mut total_ops = 0usize; - let mut total_errors = 0usize; - - for payload in payloads { - if let Some(ops) = payload.get("operations").and_then(|v| v.as_array()) { - for op in ops { - let name = op.get("name").and_then(|v| v.as_str()).unwrap_or("unknown"); - let dur = op.get("duration_ms").and_then(|v| v.as_u64()).unwrap_or(0); - let status = op.get("status").and_then(|v| v.as_str()).unwrap_or("ok"); - - let entry = op_stats.entry(name.to_string()).or_insert((0.0, 0, 0, 0)); - entry.0 += dur as f64; - if dur > entry.1 { - entry.1 = dur; - } - entry.2 += 1; - total_ops += 1; - if status == "error" { - entry.3 += 1; - total_errors += 1; - } - } - } - } - - // Build slowest operations (top 5 by avg duration) - let mut op_list: Vec = op_stats - .iter() - .map(|(name, (sum, max, count, _))| { - serde_json::json!({ - "name": name, - "avg_duration_ms": sum / *count as f64, - "max_duration_ms": max, - "count": count, - }) - }) - .collect(); - op_list.sort_by(|a, b| { - let a_avg = a["avg_duration_ms"].as_f64().unwrap_or(0.0); - let b_avg = b["avg_duration_ms"].as_f64().unwrap_or(0.0); - b_avg - .partial_cmp(&a_avg) - .unwrap_or(std::cmp::Ordering::Equal) - }); - op_list.truncate(5); - - let error_rate = if total_ops > 0 { - total_errors as f64 / total_ops as f64 - } else { - 0.0 - }; - - serde_json::json!({ - "cycle_count": cycle_count, - "avg_duration_ms": avg_duration_ms, - "p95_duration_ms": p95_duration_ms, - "total_cost_usd": total_cost_usd, - "slowest_operations": op_list, - "error_rate": error_rate, - }) -} - -// ── POST /api/snapshot ── - -#[derive(Deserialize)] -struct SnapshotBody { - context: serde_json::Value, - result: serde_json::Value, - engine_version: String, - #[serde(default = "default_snapshot_schema")] - schema_version: String, - context_hash: String, - #[serde(default = "default_redaction_level")] - redaction_level: String, - village_id: Option, - cycle_id: Option, -} - -fn default_snapshot_schema() -> String { - "snapshot.v1".to_string() -} - -fn default_redaction_level() -> String { - "full".to_string() -} - -#[derive(Serialize)] -struct SnapshotResponse { - event_id: String, - context_hash: String, -} - -async fn post_snapshot( - State(state): State>, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - - if body.engine_version.is_empty() { - return Err(AppError::Validation( - "engine_version must not be empty".into(), - )); - } - if body.context_hash.is_empty() { - return Err(AppError::Validation( - "context_hash must not be empty".into(), - )); - } - - let ledger = state.open_ledger().context("POST /api/snapshot")?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - let branch = ledger.head_branch()?; - let parent_hash = ledger.last_event_hash()?; - - // Attempt blob offload for large payloads - let context_bytes = serde_json::to_vec(&body.context)?; - let result_bytes = serde_json::to_vec(&body.result)?; - - let threshold = edda_ledger::SNAPSHOT_BLOB_THRESHOLD; - let context_blob = edda_ledger::blob_put_if_large( - &ledger.paths, - &context_bytes, - edda_ledger::BlobClass::DecisionEvidence, - threshold, - ) - .map_err(|e| anyhow::anyhow!("writing context blob: {e}"))?; - let result_blob = edda_ledger::blob_put_if_large( - &ledger.paths, - &result_bytes, - edda_ledger::BlobClass::DecisionEvidence, - threshold, - ) - .map_err(|e| anyhow::anyhow!("writing result blob: {e}"))?; - - let has_blobs = context_blob.is_some() || result_blob.is_some(); - - // Build event payload: metadata + inline or blob refs - let mut payload = serde_json::json!({ - "engine_version": body.engine_version, - "schema_version": body.schema_version, - "context_hash": body.context_hash, - "redaction_level": body.redaction_level, - }); - if let Some(ref vid) = body.village_id { - payload["village_id"] = serde_json::Value::String(vid.clone()); - } - if let Some(ref cid) = body.cycle_id { - payload["cycle_id"] = serde_json::Value::String(cid.clone()); - } - - let mut blob_refs = Vec::new(); - if let Some(ref br) = context_blob { - payload["context_blob"] = serde_json::Value::String(br.clone()); - blob_refs.push(br.clone()); - } else { - payload["context_inline"] = body.context; - } - if let Some(ref br) = result_blob { - payload["result_blob"] = serde_json::Value::String(br.clone()); - blob_refs.push(br.clone()); - } else { - payload["result_inline"] = body.result; - } - - let event = new_snapshot_event(&branch, parent_hash.as_deref(), payload, blob_refs)?; - let event_id = event.event_id.clone(); - let created_at = event.ts.clone(); - - ledger.append_event(&event)?; - - // Insert into materialized view - ledger.insert_snapshot(&edda_ledger::DecideSnapshotRow { - event_id: event_id.clone(), - context_hash: body.context_hash.clone(), - engine_version: body.engine_version, - schema_version: body.schema_version, - redaction_level: body.redaction_level, - village_id: body.village_id, - cycle_id: body.cycle_id, - has_blobs, - created_at, - })?; - - Ok(( - StatusCode::CREATED, - Json(SnapshotResponse { - event_id, - context_hash: body.context_hash, - }), - )) -} - -// ── GET /api/snapshots ── - -#[derive(Deserialize)] -struct SnapshotsQuery { - village_id: Option, - engine_version: Option, - #[serde(default = "default_snapshot_limit")] - limit: usize, -} - -fn default_snapshot_limit() -> usize { - 20 -} - -async fn get_snapshots( - State(state): State>, - Query(query): Query, -) -> Result { - let ledger = state.open_ledger().context("GET /api/snapshots")?; - let rows = ledger.query_snapshots( - query.village_id.as_deref(), - query.engine_version.as_deref(), - query.limit, - )?; - - let mut snapshots = Vec::new(); - for row in &rows { - let snapshot = reconstruct_snapshot(&ledger, row)?; - snapshots.push(snapshot); - } - - Ok(Json(snapshots)) -} - -// ── GET /api/snapshots/:context_hash ── - -async fn get_snapshots_by_hash( - State(state): State>, - AxumPath(context_hash): AxumPath, -) -> Result { - let ledger = state.open_ledger().context("GET /api/snapshots/:hash")?; - let rows = ledger.snapshots_by_context_hash(&context_hash)?; - - if rows.is_empty() { - return Err(AppError::NotFound(format!( - "no snapshots found for context_hash: {context_hash}" - ))); - } - - let mut snapshots = Vec::new(); - for row in &rows { - let snapshot = reconstruct_snapshot(&ledger, row)?; - snapshots.push(snapshot); - } - - Ok(Json(snapshots)) -} - -// ── GET /api/villages/{village_id}/stats ── - -#[derive(Deserialize)] -struct VillageStatsQuery { - /// ISO 8601 lower bound (inclusive). - after: Option, - /// ISO 8601 upper bound (inclusive). - before: Option, -} - -async fn get_village_stats( - State(state): State>, - AxumPath(village_id): AxumPath, - Query(params): Query, -) -> Result, AppError> { - if let Some(ref after) = params.after { - validate_iso8601(after).map_err(AppError::Validation)?; - } - if let Some(ref before) = params.before { - validate_iso8601(before).map_err(AppError::Validation)?; - } - - let ledger = state.open_ledger().context("GET /api/villages/:id/stats")?; - let stats = ledger.village_stats( - &village_id, - params.after.as_deref(), - params.before.as_deref(), - )?; - Ok(Json(stats)) -} - -// ── GET /api/patterns ── - -#[derive(Deserialize)] -struct PatternsQuery { - village_id: Option, - /// Number of days to look back (default 7, max 90). - #[serde(default)] - lookback_days: Option, - /// Minimum occurrences to qualify as a pattern (default 3). - #[serde(default)] - min_occurrences: Option, -} - -async fn get_patterns( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - let village_id = params - .village_id - .as_deref() - .filter(|s| !s.is_empty()) - .ok_or_else(|| AppError::Validation("village_id query parameter is required".into()))?; - - let lookback_days = params.lookback_days.unwrap_or(7).min(90); - let min_occurrences = params.min_occurrences.unwrap_or(3).max(2); - - let now = time::OffsetDateTime::now_utc(); - let after_date = now - time::Duration::days(i64::from(lookback_days)); - let after_str = after_date - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - - let ledger = state.open_ledger().context("GET /api/patterns")?; - let patterns = ledger.detect_village_patterns(village_id, &after_str, min_occurrences)?; - let total = patterns.len(); - - Ok(Json(edda_ledger::sqlite_store::PatternDetectionResult { - village_id: village_id.to_string(), - lookback_days, - after: after_str, - total_patterns: total, - patterns, - })) -} - -/// Reconstruct a full snapshot JSON from a materialized view row + event payload. -fn reconstruct_snapshot( - ledger: &Ledger, - row: &edda_ledger::DecideSnapshotRow, -) -> Result { - let event = ledger - .get_event(&row.event_id)? - .ok_or_else(|| AppError::Internal(anyhow::anyhow!("event {} not found", row.event_id)))?; - - let payload = &event.payload; - - // Resolve context: inline or blob - let context = if let Some(inline) = payload.get("context_inline") { - inline.clone() - } else if let Some(blob_ref) = payload.get("context_blob").and_then(|v| v.as_str()) { - let path = - edda_ledger::blob_get_path(&ledger.paths, blob_ref).map_err(AppError::Internal)?; - let bytes = std::fs::read(&path) - .map_err(|e| AppError::Internal(anyhow::anyhow!("read context blob: {e}")))?; - serde_json::from_slice(&bytes)? - } else { - serde_json::Value::Null - }; - - // Resolve result: inline or blob - let result = if let Some(inline) = payload.get("result_inline") { - inline.clone() - } else if let Some(blob_ref) = payload.get("result_blob").and_then(|v| v.as_str()) { - let path = - edda_ledger::blob_get_path(&ledger.paths, blob_ref).map_err(AppError::Internal)?; - let bytes = std::fs::read(&path) - .map_err(|e| AppError::Internal(anyhow::anyhow!("read result blob: {e}")))?; - serde_json::from_slice(&bytes)? - } else { - serde_json::Value::Null - }; - - Ok(serde_json::json!({ - "event_id": row.event_id, - "context_hash": row.context_hash, - "engine_version": row.engine_version, - "schema_version": row.schema_version, - "redaction_level": row.redaction_level, - "village_id": row.village_id, - "cycle_id": row.cycle_id, - "context": context, - "result": result, - "created_at": row.created_at, - })) -} - -// ── GET /api/recap ── - -#[derive(Deserialize)] -struct RecapQuery { - project: Option, - query: Option, - #[serde(rename = "since")] - _since: Option, - week: Option, - scope: Option, -} - -#[derive(Serialize)] -struct RecapAnchor { - #[serde(rename = "type")] - anchor_type: String, - value: String, -} - -#[derive(Serialize)] -struct NeedsYouItem { - severity: String, - summary: String, - action: String, -} - -#[derive(Serialize)] -struct DecisionItem { - key: String, - value: String, - reason: String, -} - -#[derive(Serialize)] -struct RelatedItem { - summary: String, - relevance: String, -} - -#[derive(Serialize)] -struct RecapLayers { - net_result: String, - needs_you: Vec, - decisions: Vec, - related: Vec, -} - -#[derive(Serialize)] -struct RecapMeta { - sessions_analyzed: usize, - llm_used: bool, - cached: bool, - #[serde(skip_serializing_if = "Option::is_none")] - cost_usd: Option, - #[serde(skip_serializing_if = "Option::is_none")] - fallback: Option, -} - -#[derive(Serialize)] -struct RecapResponse { - anchor: RecapAnchor, - layers: RecapLayers, - meta: RecapMeta, -} - -async fn get_recap( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - if state.chronicle.is_none() { - return Err(anyhow::anyhow!("chronicle feature not enabled").into()); - } - - let anchor = if let Some(ref project) = params.project { - RecapAnchor { - anchor_type: "project".to_string(), - value: project.clone(), - } - } else if let Some(ref query) = params.query { - RecapAnchor { - anchor_type: "query".to_string(), - value: query.clone(), - } - } else if params.week.unwrap_or(false) { - RecapAnchor { - anchor_type: "time".to_string(), - value: "week".to_string(), - } - } else if params.scope.as_deref() == Some("all") { - RecapAnchor { - anchor_type: "scope".to_string(), - value: "all".to_string(), - } - } else { - RecapAnchor { - anchor_type: "default".to_string(), - value: "current".to_string(), - } - }; - - // TODO: Replace with actual edda-chronicle integration when #173 is complete - // For now, return a stub response - let response = RecapResponse { - anchor, - layers: RecapLayers { - net_result: "Recap engine not yet integrated (depends on #173)".to_string(), - needs_you: vec![], - decisions: vec![], - related: vec![], - }, - meta: RecapMeta { - sessions_analyzed: 0, - llm_used: false, - cached: false, - cost_usd: None, - fallback: Some("stub".to_string()), - }, - }; - - Ok(Json(response)) -} - -// ── GET /api/recap/cached ── - -#[derive(Deserialize)] -struct RecapCachedQuery { - project: Option, -} - -async fn get_recap_cached( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - if state.chronicle.is_none() { - return Err(anyhow::anyhow!("chronicle feature not enabled").into()); - } - - let anchor = if let Some(ref project) = params.project { - RecapAnchor { - anchor_type: "project".to_string(), - value: project.clone(), - } - } else { - RecapAnchor { - anchor_type: "default".to_string(), - value: "current".to_string(), - } - }; - - // TODO: Replace with actual cache lookup when #176 is complete - // For now, return a 404-like response - let response = RecapResponse { - anchor, - layers: RecapLayers { - net_result: "No cached recap available".to_string(), - needs_you: vec![], - decisions: vec![], - related: vec![], - }, - meta: RecapMeta { - sessions_analyzed: 0, - llm_used: false, - cached: true, - cost_usd: None, - fallback: Some("cache_miss".to_string()), - }, - }; - - Ok(Json(response)) -} - -// ── GET /api/overview ── - -#[derive(Serialize)] -struct OverviewRedItem { - project: String, - summary: String, - action: String, - blocked_count: usize, -} - -#[derive(Serialize)] -struct OverviewYellowItem { - project: String, - summary: String, - eta: String, -} - -#[derive(Serialize)] -struct OverviewGreenItem { - project: String, - summary: String, -} - -#[derive(Serialize)] -struct OverviewResponse { - red: Vec, - yellow: Vec, - green: Vec, - updated_at: String, -} - -async fn get_overview( - State(state): State>, -) -> Result, AppError> { - if state.chronicle.is_none() { - return Err(anyhow::anyhow!("chronicle feature not enabled").into()); - } - - let projects = list_projects(); - let range = DateRange { - after: Some({ - let now = time::OffsetDateTime::now_utc(); - let from = now - time::Duration::days(7); - from.format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default()[..10] - .to_string() - }), - before: None, - }; - - // Compute decisions + risks for attention routing - let decisions = aggregate_decisions(&projects); - let now_iso = time::OffsetDateTime::now_utc() - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - let decision_inputs: Vec = decisions - .iter() - .map(|d| DecisionInput { - event_id: d.event_id.clone(), - key: d.key.clone(), - value: d.value.clone(), - project: d.project_name.clone(), - ts: d.ts.clone(), - }) - .collect(); - - // TODO: This event-loading block is duplicated in get_dashboard; extract into a shared helper in a follow-up. - let mut all_events = Vec::new(); - for entry in &projects { - let root = std::path::Path::new(&entry.path); - if let Ok(ledger) = Ledger::open(root) { - if let Ok(events) = ledger.iter_events() { - all_events.extend(events); - } - } - } - - let risks = compute_decision_risks( - &decision_inputs, - &all_events, - &now_iso, - &std::collections::HashSet::new(), - ); - - let response = compute_attention(&risks, &projects, &range, &[], 7); - Ok(Json(response)) -} - -// ── GET /api/projects ── - -#[derive(Serialize)] -struct ProjectStatus { - name: String, - id: String, - last_activity: String, - status: String, -} - -#[derive(Serialize)] -struct ProjectsResponse { - projects: Vec, -} - -async fn get_projects( - State(state): State>, -) -> Result, AppError> { - if state.chronicle.is_none() { - return Err(anyhow::anyhow!("chronicle feature not enabled").into()); - } - - let projects = list_projects(); - let project_statuses: Vec = projects - .into_iter() - .map(|p| ProjectStatus { - name: p.name, - id: p.project_id, - last_activity: p.last_seen, - status: "unknown".to_string(), // TODO: Calculate from overview - }) - .collect(); - - Ok(Json(ProjectsResponse { - projects: project_statuses, - })) -} - -// ── GET /api/actors ── - -#[derive(Serialize)] -struct ActorResponse { - name: String, - kind: ActorKind, - roles: Vec, - #[serde(skip_serializing_if = "Option::is_none")] - email: Option, - #[serde(skip_serializing_if = "Option::is_none")] - display_name: Option, - #[serde(skip_serializing_if = "Option::is_none")] - runtime: Option, -} - -#[derive(Serialize)] -struct ActorsListResponse { - actors: Vec, -} - -async fn get_actors( - State(state): State>, -) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/actors")?; - let cfg = policy::load_actors_from_dir(&ledger.paths.edda_dir)?; - let actors = cfg - .actors - .into_iter() - .map(|(name, def)| ActorResponse { - name, - kind: def.kind, - roles: def.roles, - email: def.email, - display_name: def.display_name, - runtime: def.runtime, - }) - .collect(); - Ok(Json(ActorsListResponse { actors })) -} - -// ── GET /api/actors/:name ── - -async fn get_actor( - State(state): State>, - AxumPath(name): AxumPath, -) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/actors/:name")?; - let cfg = policy::load_actors_from_dir(&ledger.paths.edda_dir)?; - match cfg.actors.get(&name) { - Some(def) => Ok(Json(ActorResponse { - name, - kind: def.kind.clone(), - roles: def.roles.clone(), - email: def.email.clone(), - display_name: def.display_name.clone(), - runtime: def.runtime.clone(), - })), - None => Err(AppError::NotFound(format!("Actor '{name}' not found"))), - } -} - -// ── GET /api/metrics/quality ── - -#[derive(Deserialize)] -struct QualityQuery { - after: Option, - before: Option, -} - -async fn get_quality_metrics( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - let range = DateRange { - after: params.after, - before: params.before, - }; - let ledger = state.open_ledger().context("GET /api/metrics/quality")?; - let events = ledger.iter_events_by_type("execution_event")?; - let report = model_quality_from_events(&events, &range); - Ok(Json(report)) -} - -// ── GET /api/controls/suggestions ── - -#[derive(Deserialize)] -struct ControlsSuggestionsQuery { - after: Option, - before: Option, - min_samples: Option, -} - -#[derive(Serialize)] -struct ControlsSuggestionsResponse { - suggestions: Vec, - quality: QualityReport, -} - -async fn get_controls_suggestions( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - let range = DateRange { - after: params.after, - before: params.before, - }; - let ledger = state - .open_ledger() - .context("GET /api/controls/suggestions")?; - let events = ledger.iter_events_by_type("execution_event")?; - let report = model_quality_from_events(&events, &range); - - let rules = edda_bridge_claude::controls_suggest::load_rules(); - let suggestions = evaluate_controls_rules(&rules, &report, params.min_samples); - - Ok(Json(ControlsSuggestionsResponse { - suggestions, - quality: report, - })) -} - -// ── GET /api/controls/patches ── - -#[derive(Deserialize)] -struct ControlsPatchesQuery { - status: Option, -} - -async fn get_controls_patches( - State(state): State>, - Query(params): Query, -) -> Result>, AppError> { - let project_id = edda_store::project_id(&state.repo_root); - - let status_filter = match params.status.as_deref() { - Some("pending") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Pending), - Some("approved") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Approved), - Some("dismissed") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Dismissed), - Some("applied") => Some(edda_bridge_claude::controls_suggest::PatchStatus::Applied), - Some(s) => { - return Err(AppError::Validation(format!( - "Unknown status: {s} (expected: pending, approved, dismissed, applied)" - ))); - } - None => None, - }; - - let patches = - edda_bridge_claude::controls_suggest::list_patches(&project_id, status_filter.as_ref())?; - Ok(Json(patches)) -} - -// ── POST /api/controls/patches/{patch_id}/approve ── - -#[derive(Deserialize)] -struct ApprovePatchBody { - #[serde(default = "default_approve_actor")] - by: String, -} - -fn default_approve_actor() -> String { - "api".to_string() -} - -async fn post_approve_controls_patch( - State(state): State>, - AxumPath(patch_id): AxumPath, - body: Result, JsonRejection>, -) -> Result, AppError> { - let project_id = edda_store::project_id(&state.repo_root); - let by = match body { - Ok(Json(b)) => b.by, - Err(_) => "api".to_string(), - }; - - let patch = edda_bridge_claude::controls_suggest::approve_patch(&project_id, &patch_id, &by)?; - Ok(Json(patch)) -} - -// ── GET /api/metrics/overview ── - -fn default_overview_days() -> usize { - 30 -} - -#[derive(Deserialize)] -struct MetricsOverviewQuery { - #[serde(default = "default_overview_days")] - days: usize, - group: Option, -} - -#[derive(Serialize)] -struct MetricsOverviewResponse { - period: DashboardPeriod, - projects: Vec, - totals: MetricsTotals, -} - -#[derive(Serialize)] -struct MetricsTotals { - total_cost_usd: f64, - total_events: usize, - total_commits: usize, - total_steps: u64, - overall_success_rate: f64, -} - -async fn get_metrics_overview( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - if state.chronicle.is_none() { - return Err(anyhow::anyhow!("chronicle feature not enabled").into()); - } - - let all_projects = list_projects(); - let projects: Vec<_> = if let Some(ref group) = params.group { - all_projects - .into_iter() - .filter(|p| p.group.as_deref() == Some(group.as_str())) - .collect() - } else { - all_projects - }; - - let now = time::OffsetDateTime::now_utc(); - let from_date = now - time::Duration::days(params.days as i64); - let to_str = now - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - let from_str = from_date - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - - let range = DateRange { - after: Some(from_str[..10].to_string()), - before: None, - }; - - let metrics = per_project_metrics(&projects, &range, params.days); - - let total_cost: f64 = metrics.iter().map(|m| m.cost.total_usd).sum(); - let total_events: usize = metrics.iter().map(|m| m.activity.events).sum(); - let total_commits: usize = metrics.iter().map(|m| m.activity.commits).sum(); - let total_steps: u64 = metrics.iter().map(|m| m.quality.total_steps).sum(); - let total_success: u64 = metrics - .iter() - .map(|m| (m.quality.success_rate * m.quality.total_steps as f64) as u64) - .sum(); - - let period = DashboardPeriod { - from: from_str[..10].to_string(), - to: to_str[..10].to_string(), - days: params.days, - }; - - Ok(Json(MetricsOverviewResponse { - period, - projects: metrics, - totals: MetricsTotals { - total_cost_usd: total_cost, - total_events, - total_commits, - total_steps, - overall_success_rate: if total_steps > 0 { - total_success as f64 / total_steps as f64 - } else { - 0.0 - }, - }, - })) -} - -// ── GET /api/metrics/trends ── - -fn default_trend_granularity() -> String { - "daily".to_string() -} - -#[derive(Deserialize)] -struct TrendsQuery { - #[serde(default = "default_overview_days")] - days: usize, - #[serde(default = "default_trend_granularity")] - granularity: String, - group: Option, -} - -#[derive(Serialize)] -struct TrendsResponse { - granularity: String, - data: Vec, -} - -#[derive(Serialize)] -struct TrendPoint { - date: String, - events: usize, - commits: usize, - cost_usd: f64, - execution_count: u64, - success_count: u64, - success_rate: f64, -} - -async fn get_metrics_trends( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - if state.chronicle.is_none() { - return Err(anyhow::anyhow!("chronicle feature not enabled").into()); - } - - let all_projects = list_projects(); - let projects: Vec<_> = if let Some(ref group) = params.group { - all_projects - .into_iter() - .filter(|p| p.group.as_deref() == Some(group.as_str())) - .collect() - } else { - all_projects - }; - - let now = time::OffsetDateTime::now_utc(); - let from_date = now - time::Duration::days(params.days as i64); - let from_str = from_date - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - - let range = DateRange { - after: Some(from_str[..10].to_string()), - before: None, - }; - - let r = rollup::compute_rollup(&projects, &range, "edda"); - - let data: Vec = match params.granularity.as_str() { - "weekly" => r - .weekly - .iter() - .map(|w| TrendPoint { - date: w.week_start.clone(), - events: w.events, - commits: w.commits, - cost_usd: w.cost_usd, - execution_count: w.execution_count, - success_count: w.success_count, - success_rate: if w.execution_count > 0 { - w.success_count as f64 / w.execution_count as f64 - } else { - 0.0 - }, - }) - .collect(), - "monthly" => r - .monthly - .iter() - .map(|m| TrendPoint { - date: m.month.clone(), - events: m.events, - commits: m.commits, - cost_usd: m.cost_usd, - execution_count: m.execution_count, - success_count: m.success_count, - success_rate: if m.execution_count > 0 { - m.success_count as f64 / m.execution_count as f64 - } else { - 0.0 - }, - }) - .collect(), - _ => r - .daily - .iter() - .map(|d| TrendPoint { - date: d.date.clone(), - events: d.events, - commits: d.commits, - cost_usd: d.cost_usd, - execution_count: d.execution_count, - success_count: d.success_count, - success_rate: if d.execution_count > 0 { - d.success_count as f64 / d.execution_count as f64 - } else { - 0.0 - }, - }) - .collect(), - }; - - Ok(Json(TrendsResponse { - granularity: params.granularity, - data, - })) -} - -// ── POST /api/scope/check ── - -#[derive(Deserialize)] -struct ScopeCheckBody { - project_id: String, - session_id: String, - files: Vec, -} - -#[derive(Serialize)] -struct ScopeCheckResult { - path: String, - allowed: bool, -} - -#[derive(Serialize)] -struct ScopeCheckResponse { - session_id: String, - label: String, - scope: Vec, - no_claim: bool, - all_allowed: bool, - results: Vec, -} - -// SECURITY: `project_id` is caller-supplied and not validated against any -// ACL. Acceptable because edda is a single-user local tool; revisit if -// multi-tenant isolation is ever required. -async fn post_scope_check( - Json(body): Json, -) -> Result, AppError> { - let board = edda_bridge_claude::peers::compute_board_state(&body.project_id); - let claim = board - .claims - .iter() - .find(|c| c.session_id == body.session_id); - - match claim { - None => { - // Permissive default: no claim means all files allowed - let results = body - .files - .iter() - .map(|f| ScopeCheckResult { - path: f.clone(), - allowed: true, - }) - .collect(); - Ok(Json(ScopeCheckResponse { - session_id: body.session_id, - label: String::new(), - scope: vec![], - no_claim: true, - all_allowed: true, - results, - })) - } - Some(claim) => { - // Build glob set from claim patterns - let mut builder = globset::GlobSetBuilder::new(); - for pattern in &claim.paths { - if let Ok(glob) = globset::GlobBuilder::new(pattern) - .literal_separator(false) - .build() - { - builder.add(glob); - } - } - let glob_set = builder - .build() - .map_err(|e| anyhow::anyhow!("invalid glob patterns: {}", e))?; - - let results: Vec = body - .files - .iter() - .map(|f| ScopeCheckResult { - path: f.clone(), - allowed: glob_set.is_match(f), - }) - .collect(); - - let all_allowed = results.iter().all(|r| r.allowed); - - Ok(Json(ScopeCheckResponse { - session_id: body.session_id, - label: claim.label.clone(), - scope: claim.paths.clone(), - no_claim: false, - all_allowed, - results, - })) - } - } -} - -// ── GET /api/scope/whitelist ── - -#[derive(Deserialize)] -struct WhitelistQuery { - project_id: String, - #[serde(default)] - session_id: Option, -} - -#[derive(Serialize)] -struct WhitelistClaim { - session_id: String, - label: String, - patterns: Vec, - ts: String, -} - -#[derive(Serialize)] -struct WhitelistResponse { - claims: Vec, -} - -// SECURITY: `project_id` is caller-supplied and not validated against any -// ACL. Acceptable because edda is a single-user local tool; revisit if -// multi-tenant isolation is ever required. -async fn get_scope_whitelist( - Query(query): Query, -) -> Result, AppError> { - let board = edda_bridge_claude::peers::compute_board_state(&query.project_id); - - let claims: Vec = board - .claims - .iter() - .filter(|c| { - query - .session_id - .as_ref() - .is_none_or(|sid| &c.session_id == sid) - }) - .map(|c| WhitelistClaim { - session_id: c.session_id.clone(), - label: c.label.clone(), - patterns: c.paths.clone(), - ts: c.ts.clone(), - }) - .collect(); - - Ok(Json(WhitelistResponse { claims })) -} - -// ── POST /api/authz/check ── - -async fn post_authz_check( - State(state): State>, - Json(body): Json, -) -> Result, AppError> { - let edda_dir = state.repo_root.join(".edda"); - let pol = policy::load_policy_from_dir(&edda_dir)?; - let actors = policy::load_actors_from_dir(&edda_dir)?; - let result = policy::evaluate_authz(&body, &pol, &actors); - Ok(Json(result)) -} - -// ── GET /api/tool-tier/:tool_name ── + middleware::auth_middleware, + )); -async fn get_tool_tier( - State(state): State>, - AxumPath(tool_name): AxumPath, -) -> Result, AppError> { - let edda_dir = state.repo_root.join(".edda"); - let config = edda_core::tool_tier::load_tool_tiers_from_dir(&edda_dir)?; - let result = edda_core::tool_tier::resolve_tool_tier(&config, &tool_name); - Ok(Json(result)) -} + // SECURITY: restrict CORS to localhost origins only. edda is a local + // development tool; if remote access is needed, consider adding an + // explicit --cors-origin CLI flag. + let cors = CorsLayer::new() + .allow_origin(AllowOrigin::list([ + format!("http://127.0.0.1:{}", config.port) + .parse() + .expect("valid localhost origin"), + format!("http://localhost:{}", config.port) + .parse() + .expect("valid localhost origin"), + format!("http://[::1]:{}", config.port) + .parse() + .expect("valid localhost origin"), + ])) + .allow_methods(tower_http::cors::Any) + .allow_headers(tower_http::cors::Any); -// ── POST /api/approval/check ── + let app = Router::new() + .merge(public_routes) + .merge(protected_routes) + .layer(cors) + .with_state(state); -#[derive(Deserialize)] -struct ApprovalCheckRequest { - step: String, - #[serde(default)] - bundle_id: Option, - #[serde(default)] - risk_level: Option, - #[serde(default)] - files_changed: Option, - #[serde(default)] - tests_failed: Option, - #[serde(default)] - off_limits_touched: Option, + let addr = format!("{}:{}", config.bind, config.port); + let listener = tokio::net::TcpListener::bind(&addr).await?; + eprintln!("edda HTTP server listening on http://{addr}"); + axum::serve( + listener, + app.into_make_service_with_connect_info::(), + ) + .await?; + Ok(()) } -async fn post_approval_check( - State(state): State>, - Json(body): Json, -) -> Result, AppError> { - let edda_dir = state.repo_root.join(".edda"); - let policy = edda_core::approval::load_approval_policy(&edda_dir)?; - - // Build ReviewBundle from request or from ledger - let bundle = if let Some(bundle_id) = &body.bundle_id { - let ledger = Ledger::open(&state.repo_root).context("POST /api/approval/check")?; - let Some(row) = ledger.get_bundle(bundle_id)? else { - return Err(AppError::NotFound(format!( - "Bundle '{}' not found", - bundle_id - ))); - }; - let Some(event) = ledger.get_event(&row.event_id)? else { - return Err(AppError::NotFound(format!( - "Event for bundle '{}' not found", - bundle_id - ))); - }; - serde_json::from_value::(event.payload)? +/// Build the router (for testing without binding to a port). +/// Note: no auth middleware is applied here — tests run as localhost. +#[cfg(test)] +fn router(repo_root: &Path) -> Router { + let store_root = edda_store::store_root(); + let chronicle = if store_root.exists() { + Some(ChronicleContext { + _store_root: store_root, + }) } else { - // Build a synthetic bundle from inline fields - let risk = body - .risk_level - .unwrap_or(edda_core::bundle::RiskLevel::Medium); - let file_count = body.files_changed.unwrap_or(0) as usize; - let failed = body.tests_failed.unwrap_or(0); - let files: Vec = (0..file_count) - .map(|i| edda_core::bundle::FileChange { - path: format!("file_{i}"), - added: 1, - deleted: 0, - }) - .collect(); - edda_core::bundle::ReviewBundle { - bundle_id: "inline".to_string(), - change_summary: edda_core::bundle::ChangeSummary { - files, - total_added: file_count as u32, - total_deleted: 0, - diff_ref: "inline".to_string(), - }, - test_results: edda_core::bundle::TestResults { - passed: 0, - failed, - ignored: 0, - total: failed, - failures: vec![], - command: "inline".to_string(), - }, - risk_assessment: edda_core::bundle::RiskAssessment { - level: risk, - factors: vec![], - }, - suggested_action: edda_core::bundle::SuggestedAction::Review, - suggested_reason: "inline check".to_string(), - } - }; - - let phase_state = edda_core::agent_phase::AgentPhaseState { - phase: edda_core::agent_phase::AgentPhase::Implement, - session_id: "api-check".to_string(), - label: None, - issue: None, - pr: None, - branch: None, - confidence: 1.0, - detected_at: String::new(), - signals: vec![], - }; - - let ctx = edda_core::approval::EvalContext { - bundle: &bundle, - phase: &phase_state, - off_limits_touched: body.off_limits_touched.unwrap_or(false), - consecutive_failures: 0, - current_time: Some(time::OffsetDateTime::now_utc()), + None }; - let decision = policy.evaluate(&body.step, &ctx); - Ok(Json(decision)) + let state = Arc::new(AppState { + repo_root: repo_root.to_path_buf(), + chronicle, + pending_pairings: Mutex::new(HashMap::new()), + }); + api::events::routes() + .merge(api::drafts::routes()) + .merge(api::telemetry::routes()) + .merge(api::snapshots::routes()) + .merge(api::analytics::routes()) + .merge(api::metrics::routes()) + .merge(api::dashboard::routes()) + .merge(api::policy::routes()) + .merge(api::briefs::routes()) + .merge(api::stream::routes()) + .merge(api::ingestion::routes()) + .merge(api::auth::routes()) + .merge(sync_routes()) + .with_state(state) } // ── POST /api/sync ── @@ -3582,815 +253,10 @@ async fn post_sync( })) } -// ── GET /dashboard (HTML) ── - -async fn serve_dashboard() -> impl IntoResponse { - axum::response::Html(include_str!("../static/dashboard.html")) -} - -// ── GET /api/briefs ── - -#[derive(Deserialize)] -struct BriefsQuery { - status: Option, - intent: Option, -} - -async fn get_briefs( - State(state): State>, - Query(params): Query, -) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/briefs")?; - let briefs = ledger.list_task_briefs(params.status.as_deref(), params.intent.as_deref())?; - - let items: Vec = briefs - .iter() - .map(|b| { - serde_json::json!({ - "task_id": b.task_id, - "intake_event_id": b.intake_event_id, - "title": b.title, - "intent": b.intent.as_str(), - "source_url": b.source_url, - "status": b.status.as_str(), - "branch": b.branch, - "iterations": b.iterations, - "artifacts": serde_json::from_str::(&b.artifacts).unwrap_or_default(), - "decisions": serde_json::from_str::(&b.decisions).unwrap_or_default(), - "last_feedback": b.last_feedback, - "created_at": b.created_at, - "updated_at": b.updated_at, - }) - }) - .collect(); - - Ok(Json( - serde_json::json!({ "briefs": items, "count": items.len() }), - )) -} - -// ── GET /api/briefs/:task_id ── - -async fn get_brief( - State(state): State>, - AxumPath(task_id): AxumPath, -) -> Result, AppError> { - let ledger = state.open_ledger().context("GET /api/briefs/:task_id")?; - let brief = ledger - .get_task_brief(&task_id)? - .ok_or_else(|| AppError::NotFound(format!("task brief not found: {task_id}")))?; - - Ok(Json(serde_json::json!({ - "task_id": brief.task_id, - "intake_event_id": brief.intake_event_id, - "title": brief.title, - "intent": brief.intent.as_str(), - "source_url": brief.source_url, - "status": brief.status.as_str(), - "branch": brief.branch, - "iterations": brief.iterations, - "artifacts": serde_json::from_str::(&brief.artifacts).unwrap_or_default(), - "decisions": serde_json::from_str::(&brief.decisions).unwrap_or_default(), - "last_feedback": brief.last_feedback, - "created_at": brief.created_at, - "updated_at": brief.updated_at, - }))) -} - -// ── GET /api/dashboard ── - -#[derive(Deserialize)] -struct DashboardQuery { - #[serde(default = "default_days")] - days: usize, -} - -fn default_days() -> usize { - 7 -} - -#[derive(Serialize)] -struct DashboardResponse { - period: DashboardPeriod, - summary: DashboardSummary, - attention: OverviewResponse, - timeline: Vec, - graph: edda_aggregate::graph::DependencyGraph, - risks: Vec, - project_metrics: Vec, -} - -#[derive(Serialize)] -struct DashboardPeriod { - from: String, - to: String, - days: usize, -} - -#[derive(Serialize)] -struct DashboardSummary { - total_projects: usize, - total_decisions: usize, - total_events: usize, - total_commits: usize, - total_cost_usd: f64, - overall_success_rate: f64, -} - -#[derive(Serialize)] -struct TimelineEntry { - ts: String, - event_type: String, - key: String, - value: String, - reason: String, - project: String, - risk_level: String, - supersedes: Option, -} - -async fn get_dashboard( - State(_state): State>, - Query(params): Query, -) -> Result, AppError> { - let projects = list_projects(); - - let now = time::OffsetDateTime::now_utc(); - let from_date = now - time::Duration::days(params.days as i64); - let to_str = now - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - let from_str = from_date - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_default(); - - let range = DateRange { - after: Some(from_str[..10].to_string()), - before: None, - }; - - // Summary - let agg = aggregate_overview(&projects, &range); - - // Decisions + risk scoring - let decisions = aggregate_decisions(&projects); - let now_iso = &to_str; - - let decision_inputs: Vec = decisions - .iter() - .map(|d| DecisionInput { - event_id: d.event_id.clone(), - key: d.key.clone(), - value: d.value.clone(), - project: d.project_name.clone(), - ts: d.ts.clone(), - }) - .collect(); - - // Collect all events for risk computation - // TODO: This event-loading block is duplicated in get_overview; extract into a shared helper in a follow-up. - let mut all_events = Vec::new(); - for entry in &projects { - let root = std::path::Path::new(&entry.path); - if let Ok(ledger) = Ledger::open(root) { - if let Ok(events) = ledger.iter_events() { - all_events.extend(events); - } - } - } - - // Cross-project: decision IDs that appear in provenance of events from OTHER projects - let mut cross_project_ids = std::collections::HashSet::new(); - for entry in &projects { - let root = std::path::Path::new(&entry.path); - if let Ok(ledger) = Ledger::open(root) { - if let Ok(events) = ledger.iter_events() { - for event in &events { - for prov in &event.refs.provenance { - // If this event references a decision from another project - for d in &decisions { - if d.event_id == prov.target && d.project_name != entry.name { - cross_project_ids.insert(d.event_id.clone()); - } - } - } - } - } - } - } - - let risks = compute_decision_risks(&decision_inputs, &all_events, now_iso, &cross_project_ids); - - // Build risk lookup for timeline entries - let risk_map: std::collections::HashMap<&str, &str> = risks - .iter() - .map(|r| (r.event_id.as_str(), r.risk_level.as_str())) - .collect(); - - // Timeline: decisions sorted by timestamp descending - let mut timeline: Vec = decisions - .iter() - .map(|d| { - let risk_level = risk_map - .get(d.event_id.as_str()) - .unwrap_or(&"low") - .to_string(); - TimelineEntry { - ts: d.ts.clone().unwrap_or_default(), - event_type: "decision".to_string(), - key: d.key.clone(), - value: d.value.clone(), - reason: d.reason.clone(), - project: d.project_name.clone(), - risk_level, - supersedes: None, // Would need provenance walk - } - }) - .collect(); - timeline.sort_by(|a, b| b.ts.cmp(&a.ts)); - - // Dependency graph - let graph = build_dependency_graph(&projects); - - // Per-project metrics - let project_metrics = per_project_metrics(&projects, &range, params.days); - - // Compute cost totals for summary - let total_cost: f64 = project_metrics.iter().map(|m| m.cost.total_usd).sum(); - let total_steps: u64 = project_metrics.iter().map(|m| m.quality.total_steps).sum(); - let total_success: u64 = project_metrics - .iter() - .map(|m| (m.quality.success_rate * m.quality.total_steps as f64) as u64) - .sum(); - let overall_success_rate = if total_steps > 0 { - total_success as f64 / total_steps as f64 - } else { - 0.0 - }; - - // Attention routing (with cost anomaly detection) - let attention = compute_attention(&risks, &projects, &range, &project_metrics, params.days); - - let period = DashboardPeriod { - from: from_str[..10].to_string(), - to: to_str[..10].to_string(), - days: params.days, - }; - - let summary = DashboardSummary { - total_projects: agg.projects.len(), - total_decisions: agg.total_decisions, - total_events: agg.total_events, - total_commits: agg.total_commits, - total_cost_usd: total_cost, - overall_success_rate, - }; - - Ok(Json(DashboardResponse { - period, - summary, - attention, - timeline, - graph, - risks, - project_metrics, - })) -} - -/// Compute attention routing: red / yellow / green classification. -/// -/// Includes cost anomaly detection when `project_metrics` is non-empty: -/// - Yellow: project daily cost > 2x period average -/// - Red: project daily cost > 5x period average -fn compute_attention( - risks: &[DecisionRisk], - projects: &[edda_store::registry::ProjectEntry], - range: &DateRange, - project_metrics: &[ProjectMetrics], - days: usize, -) -> OverviewResponse { - let mut red = Vec::new(); - let mut yellow = Vec::new(); - let mut green = Vec::new(); - - let now = time::OffsetDateTime::now_utc(); - let updated_at = now - .format(&time::format_description::well_known::Rfc3339) - .unwrap_or_else(|_| "unknown".to_string()); - - // Red: high-risk decisions - for r in risks { - if r.risk_level == "high" { - red.push(OverviewRedItem { - project: r.project.clone(), - summary: format!( - "{} = {} (risk {:.0}%)", - r.key, - r.value, - r.risk_score * 100.0 - ), - action: "Review before overriding".to_string(), - blocked_count: 0, - }); - } - } - - // Yellow: medium-risk decisions - for r in risks { - if r.risk_level == "medium" { - yellow.push(OverviewYellowItem { - project: r.project.clone(), - summary: format!( - "{} = {} (risk {:.0}%)", - r.key, - r.value, - r.risk_score * 100.0 - ), - eta: String::new(), - }); - } - } - - // Cost anomaly detection - if days > 0 { - for pm in project_metrics { - let daily_avg = pm.cost.daily_avg_usd; - if daily_avg > 0.0 && pm.cost.last_day_usd > 0.0 { - // Use the actual most-recent-day cost from rollup data - let last_day_cost = pm.cost.last_day_usd; - if last_day_cost > daily_avg * 5.0 { - red.push(OverviewRedItem { - project: pm.name.clone(), - summary: format!( - "Cost spike: ${:.2}/day (5x above ${:.2} avg)", - last_day_cost, daily_avg - ), - action: "Investigate cost increase".to_string(), - blocked_count: 0, - }); - } else if last_day_cost > daily_avg * 2.0 { - yellow.push(OverviewYellowItem { - project: pm.name.clone(), - summary: format!( - "Elevated cost: ${:.2}/day (2x above ${:.2} avg)", - last_day_cost, daily_avg - ), - eta: String::new(), - }); - } - } - } - } - - // Red: stale projects (no events in range) - for entry in projects { - let root = std::path::Path::new(&entry.path); - let has_events = Ledger::open(root) - .and_then(|l| l.iter_events()) - .map(|events| events.iter().any(|e| range.matches(&e.ts))) - .unwrap_or(false); - if !has_events { - red.push(OverviewRedItem { - project: entry.name.clone(), - summary: "No activity in period".to_string(), - action: "Check project status".to_string(), - blocked_count: 0, - }); - } - } - - // Green: projects with normal activity - for entry in projects { - let root = std::path::Path::new(&entry.path); - let has_events = Ledger::open(root) - .and_then(|l| l.iter_events()) - .map(|events| events.iter().any(|e| range.matches(&e.ts))) - .unwrap_or(false); - if has_events { - let high_risk = risks - .iter() - .any(|r| r.project == entry.name && r.risk_level == "high"); - if !high_risk { - green.push(OverviewGreenItem { - project: entry.name.clone(), - summary: "Normal activity".to_string(), - }); - } - } - } - - OverviewResponse { - red, - yellow, - green, - updated_at, - } -} - -// ── SSE Event Stream ── - -/// Query parameters for the SSE event stream endpoint. -#[derive(Deserialize)] -struct StreamParams { - /// Comma-separated event types to subscribe to (e.g. "decision,phase_change"). - /// If omitted, all event types are streamed. - types: Option, - /// Resume from this event_id (alternative to `Last-Event-ID` header). - since: Option, -} - -/// Map a ledger event to the SSE event name sent to clients. -/// -/// Decisions are stored as `note` events with a `decision` key in the payload, -/// so we check the payload in addition to the `event_type` field. -fn sse_event_name(event: &edda_core::Event) -> &'static str { - match event.event_type.as_str() { - "agent_phase_change" => "phase_change", - "approval_request" => "approval_pending", - "note" if event.payload.get("decision").is_some() => "decision", - _ => "new_event", - } -} - -/// `GET /api/events/stream` — Server-Sent Events endpoint. -/// -/// Streams new ledger events in real time using a poll-based approach -/// (queries SQLite rowid cursor every 2 seconds). -/// -/// Supports: -/// - `?types=decision,phase_change` — filter by SSE event type -/// - `?since=evt_xxx` or `Last-Event-ID` header — resume after disconnect -/// - 30-second keep-alive heartbeat -async fn get_event_stream( - State(state): State>, - Query(params): Query, - headers: HeaderMap, -) -> Result>>, AppError> { - // Determine the resume cursor: query param takes precedence over header. - let since = params.since.or_else(|| { - headers - .get("Last-Event-ID") - .and_then(|v| v.to_str().ok()) - .map(String::from) - }); - - // Parse type filter into a set for O(1) lookups. - let type_filter: Option> = params.types.map(|t| { - t.split(',') - .map(|s| s.trim().to_string()) - .filter(|s| !s.is_empty()) - .collect() - }); - - // Resolve the initial cursor (rowid) from `since` event_id. - let mut cursor: i64 = if let Some(ref event_id) = since { - let ledger = state.open_ledger().context("GET /api/events/stream")?; - ledger.rowid_for_event_id(event_id)?.unwrap_or(0) - } else { - 0 - }; - - let repo_root = state.repo_root.clone(); - - let stream = async_stream::stream! { - let mut interval = tokio::time::interval(Duration::from_secs(2)); - loop { - interval.tick().await; - - let ledger = match edda_ledger::Ledger::open(&repo_root) { - Ok(l) => l, - Err(_) => continue, - }; - - let new_events = match ledger.events_after_rowid(cursor) { - Ok(evts) => evts, - Err(_) => continue, - }; - - if new_events.is_empty() { - continue; - } - - // Update cursor to the latest rowid. - if let Some((last_rowid, _)) = new_events.last() { - cursor = *last_rowid; - } - - for (_rowid, event) in new_events { - let sse_name = sse_event_name(&event); - - // Apply type filter if specified. - if let Some(ref filters) = type_filter { - if !filters.iter().any(|f| f == sse_name) { - continue; - } - } - - let event_id = event.event_id.clone(); - let data = serde_json::json!({ - "event_type": sse_name, - "data": serde_json::to_value(&event).unwrap_or_default(), - "ts": &event.ts, - }); - - let sse_event = SseEvent::default() - .event(sse_name) - .id(event_id) - .json_data(data) - .unwrap_or_else(|_| SseEvent::default().comment("serialization error")); - - yield Ok::<_, Infallible>(sse_event); - } - } - }; - - Ok(Sse::new(stream).keep_alive( - KeepAlive::new() - .interval(Duration::from_secs(30)) - .text("ping"), - )) -} - -// ── Ingestion types ── - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct EvaluateBody { - event_type: String, - source_layer: String, - #[serde(default)] - source_refs: Vec, - #[serde(default)] - summary: Option, - #[serde(default)] - detail: Option, - #[serde(default)] - tags: Vec, -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -struct EvaluateResponse { - action: String, - #[serde(skip_serializing_if = "Option::is_none")] - record_id: Option, - #[serde(skip_serializing_if = "Option::is_none")] - suggestion_id: Option, - #[serde(skip_serializing_if = "Option::is_none")] - reason: Option, -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct ManualIngestBody { - event_type: String, - source_layer: String, - #[serde(default)] - source_refs: Vec, - summary: String, - detail: serde_json::Value, - #[serde(default)] - tags: Vec, -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct IngestionRecordsQuery { - #[serde(default)] - limit: Option, - #[serde(default)] - source_layer: Option, - #[serde(default)] - trigger_type: Option, -} - -// ── Ingestion handlers ── - -fn time_now_rfc3339() -> String { - time::OffsetDateTime::now_utc() - .format(&time::format_description::well_known::Rfc3339) - .expect("RFC3339 formatting should not fail") -} - -// POST /api/ingestion/evaluate -async fn post_ingestion_evaluate( - State(state): State>, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - - let layer: edda_ingestion::SourceLayer = body - .source_layer - .parse() - .map_err(|e: String| AppError::Validation(e))?; - - let result = edda_ingestion::evaluate_trigger(&body.event_type, &body.source_layer); - - match result { - edda_ingestion::TriggerResult::AutoIngest => { - let ledger = state.open_ledger()?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - let summary = body - .summary - .unwrap_or_else(|| format!("{} from {}", body.event_type, body.source_layer)); - let record = edda_ingestion::IngestionRecord { - id: edda_ingestion::IngestionRecord::new_id("prec"), - trigger_type: edda_ingestion::TriggerType::Auto, - event_type: body.event_type, - source_layer: layer, - source_refs: body.source_refs, - summary, - detail: body.detail.unwrap_or(serde_json::json!({})), - tags: body.tags, - created_at: time_now_rfc3339(), - }; - - edda_ingestion::write_ingestion_record(&ledger, &record)?; - - Ok(( - StatusCode::CREATED, - Json(EvaluateResponse { - action: "ingested".to_string(), - record_id: Some(record.id), - suggestion_id: None, - reason: None, - }), - ) - .into_response()) - } - edda_ingestion::TriggerResult::SuggestIngest { reason } => { - let ledger = state.open_ledger()?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - let summary = body - .summary - .unwrap_or_else(|| format!("{} from {}", body.event_type, body.source_layer)); - let suggestion = edda_ingestion::Suggestion { - id: edda_ingestion::Suggestion::new_id(), - event_type: body.event_type, - source_layer: layer, - source_refs: body.source_refs, - summary, - suggested_because: reason.clone(), - detail: body.detail.unwrap_or(serde_json::json!({})), - tags: body.tags, - status: edda_ingestion::SuggestionStatus::Pending, - created_at: time_now_rfc3339(), - reviewed_at: None, - }; - - let queue = edda_ingestion::SuggestionQueue::new(&ledger); - let id = queue.enqueue(&suggestion)?; - - Ok(( - StatusCode::OK, - Json(EvaluateResponse { - action: "queued".to_string(), - record_id: None, - suggestion_id: Some(id), - reason: Some(reason), - }), - ) - .into_response()) - } - edda_ingestion::TriggerResult::Skip => Ok(( - StatusCode::OK, - Json(EvaluateResponse { - action: "skipped".to_string(), - record_id: None, - suggestion_id: None, - reason: None, - }), - ) - .into_response()), - } -} - -// POST /api/ingestion/records -async fn post_ingestion_record( - State(state): State>, - body: Result, JsonRejection>, -) -> Result { - let Json(body) = body.map_err(|e| AppError::Validation(e.body_text()))?; - - let layer: edda_ingestion::SourceLayer = body - .source_layer - .parse() - .map_err(|e: String| AppError::Validation(e))?; - - let ledger = state.open_ledger()?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - let record = edda_ingestion::IngestionRecord { - id: edda_ingestion::IngestionRecord::new_id("prec"), - trigger_type: edda_ingestion::TriggerType::Manual, - event_type: body.event_type, - source_layer: layer, - source_refs: body.source_refs, - summary: body.summary, - detail: body.detail, - tags: body.tags, - created_at: time_now_rfc3339(), - }; - - edda_ingestion::write_ingestion_record(&ledger, &record)?; - - Ok(( - StatusCode::CREATED, - Json(serde_json::json!({ "recordId": record.id })), - )) -} - -// GET /api/ingestion/records -async fn get_ingestion_records( - State(state): State>, - Query(params): Query, -) -> Result>, AppError> { - let ledger = state.open_ledger()?; - let events = ledger.iter_events_by_type("ingestion")?; - - let mut records: Vec = events - .into_iter() - .filter_map(|e| serde_json::from_value(e.payload).ok()) - .collect(); - - if let Some(ref layer) = params.source_layer { - records.retain(|r| r.source_layer.to_string() == *layer); - } - if let Some(ref tt) = params.trigger_type { - records.retain(|r| { - let label = match r.trigger_type { - edda_ingestion::TriggerType::Auto => "auto", - edda_ingestion::TriggerType::Suggested => "suggested", - edda_ingestion::TriggerType::Manual => "manual", - }; - label == tt.as_str() - }); - } - - let limit = params.limit.unwrap_or(50); - records.truncate(limit); - - Ok(Json(records)) -} - -// GET /api/ingestion/suggestions -async fn get_ingestion_suggestions( - State(state): State>, -) -> Result>, AppError> { - let ledger = state.open_ledger()?; - let queue = edda_ingestion::SuggestionQueue::new(&ledger); - let pending = queue.list_pending()?; - Ok(Json(pending)) -} - -// POST /api/ingestion/suggestions/{id}/accept -async fn post_suggestion_accept( - State(state): State>, - AxumPath(id): AxumPath, -) -> Result, AppError> { - let ledger = state.open_ledger()?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - // Pre-check for proper HTTP error codes - let row = ledger - .get_suggestion(&id)? - .ok_or_else(|| AppError::NotFound(format!("suggestion not found: {id}")))?; - if row.status != "pending" { - return Err(AppError::Conflict(format!( - "suggestion {id} has status '{}', expected 'pending'", - row.status - ))); - } - - let queue = edda_ingestion::SuggestionQueue::new(&ledger); - let record = queue.accept(&id)?; - Ok(Json(record)) -} - -// POST /api/ingestion/suggestions/{id}/reject -async fn post_suggestion_reject( - State(state): State>, - AxumPath(id): AxumPath, -) -> Result, AppError> { - let ledger = state.open_ledger()?; - let _lock = WorkspaceLock::acquire(&ledger.paths)?; - - // Pre-check for proper HTTP error codes - let row = ledger - .get_suggestion(&id)? - .ok_or_else(|| AppError::NotFound(format!("suggestion not found: {id}")))?; - if row.status != "pending" { - return Err(AppError::Conflict(format!( - "suggestion {id} has status '{}', expected 'pending'", - row.status - ))); - } - - let queue = edda_ingestion::SuggestionQueue::new(&ledger); - queue.reject(&id)?; - Ok(Json(serde_json::json!({ "ok": true }))) +#[cfg(test)] +fn sync_routes() -> Router> { + use axum::routing::post; + Router::new().route("/api/sync", post(post_sync)) } // ── Tests ── @@ -4399,8 +265,17 @@ async fn post_suggestion_reject( #[allow(clippy::await_holding_lock, clippy::len_zero)] mod tests { use super::*; + use crate::api::dashboard::compute_attention; use axum::body::Body; - use axum::http::Request; + use axum::extract::ConnectInfo; + use axum::http::{Request, StatusCode}; + use edda_aggregate::aggregate::DateRange; + use edda_aggregate::quality::QualityReport; + use edda_core::event::{new_decision_event, new_note_event}; + use edda_core::types::DecisionPayload; + use edda_ledger::device_token::{generate_device_token, hash_token}; + use edda_ledger::Ledger; + use std::time::Duration; use tower::ServiceExt; /// Serialize tests that set EDDA_STORE_ROOT env var. @@ -7766,11 +3641,12 @@ actors: pending_pairings: Mutex::new(HashMap::new()), }); - let public_routes = Router::new().route("/api/health", get(health)); + let public_routes = api::events::public_routes(); - let protected_routes = Router::new().route("/api/status", get(get_status)).layer( - middleware::from_fn_with_state(state.clone(), auth_middleware), - ); + let protected_routes = api::events::protected_routes().layer(axum_mw::from_fn_with_state( + state.clone(), + middleware::auth_middleware, + )); Router::new() .merge(public_routes) diff --git a/crates/edda-serve/src/middleware.rs b/crates/edda-serve/src/middleware.rs new file mode 100644 index 0000000..4bd75fe --- /dev/null +++ b/crates/edda-serve/src/middleware.rs @@ -0,0 +1,77 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use anyhow::Context; +use axum::extract::{ConnectInfo, State}; +use axum::http::Request; +use axum::middleware::Next; +use axum::response::Response; +use edda_ledger::device_token::hash_token; + +use crate::error::AppError; +use crate::state::AppState; + +/// Check if a socket address is localhost. +pub(crate) fn is_localhost(addr: &SocketAddr) -> bool { + let ip = addr.ip(); + ip.is_loopback() + || match ip { + std::net::IpAddr::V6(v6) => { + // IPv4-mapped IPv6: ::ffff:127.0.0.1 + if let Some(v4) = v6.to_ipv4_mapped() { + v4.is_loopback() + } else { + false + } + } + _ => false, + } +} + +/// Generate a pairing token (random hex, shorter). +pub(crate) fn generate_pairing_token() -> String { + use rand::Rng; + let mut rng = rand::thread_rng(); + let mut bytes = [0u8; 16]; + rng.fill(&mut bytes); + hex::encode(bytes) +} + +/// Auth middleware: localhost passes through, remote needs Bearer token. +pub(crate) async fn auth_middleware( + ConnectInfo(addr): ConnectInfo, + State(state): State>, + req: Request, + next: Next, +) -> Result { + // Localhost: always allowed (backward compat) + if is_localhost(&addr) { + return Ok(next.run(req).await); + } + + // Remote: check Authorization header + let auth_header = req + .headers() + .get("authorization") + .and_then(|v| v.to_str().ok()); + + let raw_token = match auth_header { + Some(h) if h.starts_with("Bearer ") => &h[7..], + _ => { + return Err(AppError::Unauthorized( + "missing or invalid Authorization header".to_string(), + )); + } + }; + + let token_hash = hash_token(raw_token); + let ledger = state.open_ledger().context("auth_middleware")?; + let device = ledger.validate_device_token(&token_hash)?; + + match device { + Some(_) => Ok(next.run(req).await), + None => Err(AppError::Unauthorized( + "invalid or revoked device token".to_string(), + )), + } +} diff --git a/crates/edda-serve/src/state.rs b/crates/edda-serve/src/state.rs new file mode 100644 index 0000000..714042a --- /dev/null +++ b/crates/edda-serve/src/state.rs @@ -0,0 +1,35 @@ +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::Mutex; + +use edda_ledger::Ledger; + +// ── Config ── + +pub struct ServeConfig { + pub bind: String, + pub port: u16, +} + +// ── App State ── + +pub(crate) struct AppState { + pub(crate) repo_root: PathBuf, + pub(crate) chronicle: Option, + pub(crate) pending_pairings: Mutex>, +} + +pub(crate) struct PairingRequest { + pub(crate) device_name: String, + pub(crate) expires_at: std::time::Instant, +} + +pub(crate) struct ChronicleContext { + pub(crate) _store_root: PathBuf, +} + +impl AppState { + pub(crate) fn open_ledger(&self) -> anyhow::Result { + Ledger::open(&self.repo_root) + } +}