diff --git a/integration-tests/Cargo.lock b/integration-tests/Cargo.lock index 795dff5b0..2327f6726 100644 --- a/integration-tests/Cargo.lock +++ b/integration-tests/Cargo.lock @@ -1878,6 +1878,8 @@ dependencies = [ "pool_sv2", "primitive-types", "rand", + "serde", + "serde_json", "sha2 0.10.9", "stratum-apps", "tokio", diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 479107611..a11984ee3 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -28,6 +28,8 @@ tracing = { version = "0.1.41", default-features = false } tracing-subscriber = { version = "0.3.19", default-features = false } hex = "0.4.3" clap = { version = "^4.5.4", features = ["derive"] } +serde_json = "1" +serde = { version = "1", features = ["derive"] } # Direct dependencies kept only for the embedded `mining_device` module. # Remove this block when removing: diff --git a/integration-tests/lib/prometheus_metrics_assertions.rs b/integration-tests/lib/prometheus_metrics_assertions.rs index bfeb5364d..1600e7597 100644 --- a/integration-tests/lib/prometheus_metrics_assertions.rs +++ b/integration-tests/lib/prometheus_metrics_assertions.rs @@ -1,7 +1,104 @@ //! Helpers for querying and asserting on Prometheus metrics and JSON API endpoints //! exposed by SV2 components during integration tests. +use serde::Deserialize; use std::net::SocketAddr; +use std::time::Duration; + +// ── Typed response structs for JSON API endpoints ───────────────────────────── + +/// Response from `/api/v1/global` +#[derive(Debug, Deserialize)] +pub struct GlobalResponse { + pub uptime_secs: u64, + pub server: Option, + pub sv2_clients: Option, + pub sv1_clients: Option, +} + +/// Server summary in global response +#[derive(Debug, Deserialize)] +pub struct ServerSummary { + pub extended_channels: u64, + pub standard_channels: u64, +} + +/// SV2 clients summary in global response +#[derive(Debug, Deserialize)] +pub struct Sv2ClientsSummary { + pub total_clients: u64, +} + +/// SV1 clients summary in global response +#[derive(Debug, Deserialize)] +pub struct Sv1ClientsSummary { + pub total_clients: u64, +} + +/// Response from `/api/v1/server` +#[derive(Debug, Deserialize)] +pub struct ServerResponse { + pub extended_channels_count: usize, +} + +/// Response from `/api/v1/server/channels` +#[derive(Debug, Deserialize)] +pub struct ServerChannelsResponse { + pub total_extended: u64, + pub extended_channels: Vec, +} + +/// Extended channel info from server perspective +#[derive(Debug, Deserialize)] +pub struct ServerExtendedChannel { + pub shares_acknowledged: Option, +} + +/// Generic paginated response +#[derive(Debug, Deserialize)] +pub struct PaginatedResponse { + pub total: u64, + pub items: Vec, +} + +/// Client metadata in paginated clients list +#[derive(Debug, Deserialize)] +pub struct ClientMetadata { + pub client_id: u64, +} + +/// Response from `/api/v1/clients/{id}/channels` +#[derive(Debug, Deserialize)] +pub struct ClientChannelsResponse { + pub client_id: u64, + pub total_extended: u64, + pub total_standard: u64, +} + +/// SV1 client info +#[derive(Debug, Deserialize)] +pub struct Sv1Client { + pub client_id: u64, + pub authorized_worker_name: String, +} + +/// Error response body +#[derive(Debug, Deserialize)] +pub struct ErrorResponse { + pub error: String, +} + +/// Root endpoint response +#[derive(Debug, Deserialize)] +pub struct RootResponse { + pub service: String, + pub endpoints: serde_json::Value, +} + +/// Default timeout used when polling for eventually-consistent data. +/// Needs to be generous enough for the monitoring snapshot cache (1s refresh) to populate +/// under CI load, where components may take several seconds to complete handshakes. +pub const POLL_TIMEOUT: Duration = Duration::from_secs(15); /// Fetch the raw Prometheus text-format metrics from a component's `/metrics` endpoint. /// Uses `spawn_blocking` to avoid blocking the tokio runtime with synchronous HTTP calls. @@ -27,6 +124,229 @@ pub async fn fetch_api(monitoring_addr: SocketAddr, path: &str) -> String { .expect("spawn_blocking for fetch_api panicked") } +/// Fetch a JSON API endpoint and parse the response into a `serde_json::Value`. +pub async fn fetch_api_json(monitoring_addr: SocketAddr, path: &str) -> serde_json::Value { + let body = fetch_api(monitoring_addr, path).await; + serde_json::from_str(&body).unwrap_or_else(|e| { + panic!( + "Failed to parse JSON from {} response: {}\nBody: {}", + path, e, body + ) + }) +} + +/// Fetch a JSON API endpoint and parse the response into a typed struct. +pub async fn fetch_api_typed( + monitoring_addr: SocketAddr, + path: &str, +) -> T { + let body = fetch_api(monitoring_addr, path).await; + serde_json::from_str(&body).unwrap_or_else(|e| { + panic!( + "Failed to parse JSON from {} into {}: {}\nBody: {}", + path, + std::any::type_name::(), + e, + body + ) + }) +} + +/// Fetch a JSON API endpoint returning both the HTTP status code and parsed JSON body. +/// Unlike `fetch_api_json`, this does **not** panic on non-2xx responses, so it can be +/// used to test error endpoints (e.g. 404). +pub async fn fetch_api_with_status( + monitoring_addr: SocketAddr, + path: &str, +) -> (i32, serde_json::Value) { + let url = format!("http://{}{}", monitoring_addr, path); + tokio::task::spawn_blocking(move || { + let (status, bytes) = crate::utils::http::make_get_request_with_status(&url, 5); + let body = String::from_utf8(bytes).expect("api response should be valid UTF-8"); + let json: serde_json::Value = serde_json::from_str(&body).unwrap_or_else(|e| { + panic!( + "Failed to parse JSON from {} (status {}): {}\nBody: {}", + url, status, e, body + ) + }); + (status, json) + }) + .await + .expect("spawn_blocking for fetch_api_with_status panicked") +} + +/// Poll a JSON API endpoint until a numeric field at `json_pointer` (RFC 6901, e.g. +/// `"/sv2_clients/total_clients"`) reaches `>= min`. Returns the full JSON value once +/// satisfied. Panics if the condition is not met within `timeout`. +/// +/// This is the JSON equivalent of `poll_until_metric_gte` — use it for endpoints whose +/// data only appears after the monitoring snapshot cache has refreshed. +pub async fn poll_until_api_field_gte( + monitoring_addr: SocketAddr, + path: &str, + json_pointer: &str, + min: f64, + timeout: std::time::Duration, +) -> serde_json::Value { + let deadline = tokio::time::Instant::now() + timeout; + loop { + // Use fetch_api_with_status so that transient non-2xx responses (e.g. 404 + // before the snapshot cache has populated) are retried instead of panicking. + let (status, json) = fetch_api_with_status(monitoring_addr, path).await; + if (200..300).contains(&status) { + if let Some(val) = json.pointer(json_pointer) { + let num = val.as_f64().unwrap_or(0.0); + if num >= min { + return json; + } + } + } + if tokio::time::Instant::now() >= deadline { + panic!( + "JSON field '{}' at {} never reached >= {} within {:?}. Last status: {}. Last response:\n{}", + json_pointer, + path, + min, + timeout, + status, + serde_json::to_string_pretty(&json).unwrap_or_default() + ); + } + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + } +} + +/// Internal: poll `path` until `condition` is met, returning the parsed `T`. +/// Retries on non-2xx responses (endpoint may not be ready yet). +async fn poll_until( + monitoring_addr: SocketAddr, + path: &'static str, + timeout: Duration, + condition: F, + timeout_msg: &'static str, +) -> T +where + T: serde::de::DeserializeOwned, + F: Fn(&T) -> bool, +{ + let deadline = tokio::time::Instant::now() + timeout; + loop { + let (status, body) = { + let url = format!("http://{}{}", monitoring_addr, path); + tokio::task::spawn_blocking(move || { + crate::utils::http::make_get_request_with_status(&url, 5) + }) + .await + .expect("spawn_blocking panicked") + }; + if (200..300).contains(&status) { + let body_str = String::from_utf8(body).expect("response should be valid UTF-8"); + if let Ok(resp) = serde_json::from_str::(&body_str) { + if condition(&resp) { + return resp; + } + } + } + if tokio::time::Instant::now() >= deadline { + panic!("{} within {:?}", timeout_msg, timeout); + } + tokio::time::sleep(Duration::from_millis(500)).await; + } +} + +/// Poll the `/api/v1/global` endpoint until `sv2_clients.total_clients >= min`. +/// Returns the typed `GlobalResponse` once satisfied. +pub async fn poll_until_global_sv2_clients_gte( + monitoring_addr: SocketAddr, + min: u64, + timeout: Duration, +) -> GlobalResponse { + poll_until( + monitoring_addr, + "/api/v1/global", + timeout, + |r: &GlobalResponse| { + r.sv2_clients + .as_ref() + .is_some_and(|c| c.total_clients >= min) + }, + "GlobalResponse sv2_clients.total_clients never reached >= expected", + ) + .await +} + +/// Poll the `/api/v1/global` endpoint until `sv1_clients.total_clients >= min`. +/// Returns the typed `GlobalResponse` once satisfied. +pub async fn poll_until_global_sv1_clients_gte( + monitoring_addr: SocketAddr, + min: u64, + timeout: Duration, +) -> GlobalResponse { + poll_until( + monitoring_addr, + "/api/v1/global", + timeout, + |r: &GlobalResponse| { + r.sv1_clients + .as_ref() + .is_some_and(|c| c.total_clients >= min) + }, + "GlobalResponse sv1_clients.total_clients never reached >= expected", + ) + .await +} + +/// Poll the `/api/v1/clients` endpoint until `total >= min`. +/// Returns the typed `PaginatedResponse` once satisfied. +pub async fn poll_until_clients_total_gte( + monitoring_addr: SocketAddr, + min: u64, + timeout: Duration, +) -> PaginatedResponse { + poll_until( + monitoring_addr, + "/api/v1/clients", + timeout, + |r: &PaginatedResponse| r.total >= min, + "PaginatedResponse total never reached >= expected", + ) + .await +} + +/// Poll the `/api/v1/sv1/clients` endpoint until `total >= min`. +/// Returns the typed `PaginatedResponse` once satisfied. +pub async fn poll_until_sv1_clients_total_gte( + monitoring_addr: SocketAddr, + min: u64, + timeout: Duration, +) -> PaginatedResponse { + poll_until( + monitoring_addr, + "/api/v1/sv1/clients", + timeout, + |r: &PaginatedResponse| r.total >= min, + "PaginatedResponse total never reached >= expected", + ) + .await +} + +/// Poll the `/api/v1/server` endpoint until `extended_channels_count >= min`. +/// Returns the typed `ServerResponse` once satisfied. +pub async fn poll_until_server_channels_gte( + monitoring_addr: SocketAddr, + min: usize, + timeout: Duration, +) -> ServerResponse { + poll_until( + monitoring_addr, + "/api/v1/server", + timeout, + |r: &ServerResponse| r.extended_channels_count >= min, + "ServerResponse extended_channels_count never reached >= expected", + ) + .await +} + /// Parse a specific metric value from Prometheus text format. /// Returns `None` if the metric line is not found. /// @@ -53,6 +373,7 @@ pub(crate) fn parse_metric_value(metrics_text: &str, metric_name: &str) -> Optio } /// Assert that a metric is present and its value satisfies the given predicate. +#[track_caller] pub(crate) fn assert_metric bool>( metrics_text: &str, metric_name: &str, @@ -80,6 +401,7 @@ pub(crate) fn assert_metric bool>( } /// Assert that a metric is present with a value >= the given minimum. +#[track_caller] pub fn assert_metric_gte(metrics_text: &str, metric_name: &str, min: f64) { assert_metric( metrics_text, @@ -90,6 +412,7 @@ pub fn assert_metric_gte(metrics_text: &str, metric_name: &str, min: f64) { } /// Assert that a metric is present with the exact given value. +#[track_caller] pub fn assert_metric_eq(metrics_text: &str, metric_name: &str, expected: f64) { assert_metric( metrics_text, @@ -100,6 +423,7 @@ pub fn assert_metric_eq(metrics_text: &str, metric_name: &str, expected: f64) { } /// Assert that a metric name does NOT appear in the metrics output at all. +#[track_caller] pub fn assert_metric_not_present(metrics_text: &str, metric_name: &str) { for line in metrics_text.lines() { if line.starts_with('#') { @@ -118,6 +442,7 @@ pub fn assert_metric_not_present(metrics_text: &str, metric_name: &str) { } /// Assert that a metric name appears at least once in the metrics output (with any label/value). +#[track_caller] pub fn assert_metric_present(metrics_text: &str, metric_name: &str) { for line in metrics_text.lines() { if line.starts_with('#') { @@ -188,17 +513,18 @@ pub async fn poll_until_metric_gte( } } -/// Assert that the `/api/v1/health` endpoint returns a response containing `"status":"ok"`. +/// Assert that the `/api/v1/health` endpoint returns `{"status":"ok"}`. pub async fn assert_api_health(monitoring_addr: SocketAddr) { - let body = fetch_api(monitoring_addr, "/api/v1/health").await; - assert!( - body.contains("\"status\":\"ok\""), + let health: serde_json::Value = fetch_api_typed(monitoring_addr, "/api/v1/health").await; + assert_eq!( + health["status"], "ok", "Health endpoint should return ok status, got: {}", - body + health ); } /// Assert that the uptime metric is present and positive. +#[track_caller] pub fn assert_uptime(metrics_text: &str) { assert_metric( metrics_text, diff --git a/integration-tests/lib/utils.rs b/integration-tests/lib/utils.rs index 6fc81d3e0..a2e95e26f 100644 --- a/integration-tests/lib/utils.rs +++ b/integration-tests/lib/utils.rs @@ -407,6 +407,43 @@ pub fn into_static(m: AnyMessage<'_>) -> AnyMessage<'static> { } pub mod http { + /// Make a GET request that returns both the HTTP status code and the response body. + /// Unlike `make_get_request`, this does NOT panic on non-2xx status codes (e.g. 404), + /// making it suitable for testing API error responses. + /// Only retries on 5xx errors or connection failures. + pub fn make_get_request_with_status(url: &str, retries: usize) -> (i32, Vec) { + for attempt in 1..=retries { + let response = minreq::get(url).send(); + match response { + Ok(res) => { + let status_code = res.status_code; + if (500..600).contains(&status_code) { + eprintln!( + "Attempt {attempt}: URL {url} returned a server error code {status_code}" + ); + } else { + return (status_code, res.as_bytes().to_vec()); + } + } + Err(err) => { + eprintln!( + "Attempt {}: Failed to fetch URL {}: {:?}", + attempt + 1, + url, + err + ); + } + } + + if attempt < retries { + let delay = 1u64 << (attempt - 1); + eprintln!("Retrying in {delay} seconds (exponential backoff)..."); + std::thread::sleep(std::time::Duration::from_secs(delay)); + } + } + panic!("Cannot reach URL {url} after {retries} attempts"); + } + pub fn make_get_request(download_url: &str, retries: usize) -> Vec { for attempt in 1..=retries { let response = minreq::get(download_url).send(); diff --git a/integration-tests/tests/monitoring_integration.rs b/integration-tests/tests/monitoring_integration.rs index 1aca9c4c4..0241ecbdb 100644 --- a/integration-tests/tests/monitoring_integration.rs +++ b/integration-tests/tests/monitoring_integration.rs @@ -9,10 +9,7 @@ use integration_tests_sv2::{ }; use stratum_apps::stratum_core::mining_sv2::*; -// --------------------------------------------------------------------------- -// 1. Pool + SV2 Mining Device (standard channel) Pool role exposes: client metrics (connections, -// channels, shares, hashrate) Pool has NO upstream, so server metrics should be absent. -// --------------------------------------------------------------------------- +// Pool + SV2 Mining Device: Pool exposes client metrics, no server metrics (no upstream). #[tokio::test] async fn pool_monitoring_with_sv2_mining_device() { start_tracing(); @@ -46,7 +43,7 @@ async fn pool_monitoring_with_sv2_mining_device() { pool_mon, "sv2_client_shares_accepted_total", 1.0, - std::time::Duration::from_secs(10), + POLL_TIMEOUT, ) .await; assert_uptime(&pool_metrics); @@ -61,11 +58,7 @@ async fn pool_monitoring_with_sv2_mining_device() { shutdown_all!(pool); } -// --------------------------------------------------------------------------- -// 2. Pool + tProxy + SV1 miner (non-aggregated) Pool: client metrics (1 SV2 client = tProxy, -// extended channel, shares) tProxy: server metrics (upstream channel to pool), SV1 metrics (1 -// SV1 client) tProxy has no SV2 downstreams so sv2_clients_total should be absent -// --------------------------------------------------------------------------- +// Pool + tProxy + SV1 miner: Pool sees 1 SV2 client, tProxy sees 1 SV1 client and 1 upstream channel. #[tokio::test] async fn pool_and_tproxy_monitoring_with_sv1_miner() { start_tracing(); @@ -92,7 +85,7 @@ async fn pool_and_tproxy_monitoring_with_sv1_miner() { pool_mon, "sv2_client_shares_accepted_total", 1.0, - std::time::Duration::from_secs(10), + POLL_TIMEOUT, ) .await; assert_uptime(&pool_metrics); @@ -107,7 +100,7 @@ async fn pool_and_tproxy_monitoring_with_sv1_miner() { tproxy_mon, "sv2_server_shares_accepted_total", 1.0, - std::time::Duration::from_secs(10), + POLL_TIMEOUT, ) .await; assert_uptime(&tproxy_metrics); @@ -125,10 +118,7 @@ async fn pool_and_tproxy_monitoring_with_sv1_miner() { shutdown_all!(pool, tproxy); } -// --------------------------------------------------------------------------- -// 3. Pool + JDC + tProxy + 2 SV1 miners (aggregated) tProxy aggregated: 2 SV1 clients, 1 upstream -// extended channel Pool: 1 SV2 client (JDC), shares accepted -// --------------------------------------------------------------------------- +// Pool + JDC + tProxy + 2 SV1 miners: aggregated topology with multiple SV1 clients. #[tokio::test] async fn jd_aggregated_topology_monitoring() { start_tracing(); @@ -175,7 +165,7 @@ async fn jd_aggregated_topology_monitoring() { pool_mon, "sv2_client_shares_accepted_total", 1.0, - std::time::Duration::from_secs(10), + POLL_TIMEOUT, ) .await; assert_uptime(&pool_metrics); @@ -198,10 +188,7 @@ async fn jd_aggregated_topology_monitoring() { shutdown_all!(pool, jdc, tproxy); } -// --------------------------------------------------------------------------- -// 4. Block found detection via metrics Uses JDC topology (which finds regtest blocks). After a -// block is found, the pool's sv2_client_blocks_found_total metric should be >= 1. -// --------------------------------------------------------------------------- +// Block found detection: JDC topology finds regtest blocks, pool metrics reflect it. #[tokio::test] async fn block_found_detected_in_pool_metrics() { use stratum_apps::stratum_core::template_distribution_sv2::*; @@ -233,15 +220,532 @@ async fn block_found_detected_in_pool_metrics() { // Poll until block found metric appears in monitoring cache let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); - let pool_metrics = poll_until_metric_gte( - pool_mon, - "sv2_client_blocks_found_total", - 1.0, - std::time::Duration::from_secs(10), - ) - .await; + let pool_metrics = + poll_until_metric_gte(pool_mon, "sv2_client_blocks_found_total", 1.0, POLL_TIMEOUT).await; assert_uptime(&pool_metrics); assert_metric_eq(&pool_metrics, "sv2_clients_total", 1.0); shutdown_all!(pool, jdc, tproxy); } + +// Pool JSON API: root endpoint lists available APIs. +#[tokio::test] +async fn pool_api_root_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, _pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Inline assertion: root endpoint returns expected API listing structure + let json = fetch_api_json(pool_mon, "/").await; + assert_eq!( + json["service"], "SRI Monitoring API", + "Root endpoint should return service name, got: {}", + json + ); + assert!( + json["endpoints"].is_object(), + "Root endpoint should list endpoints, got: {}", + json + ); + + pool.shutdown().await; +} + +// Pool JSON API: `/api/v1/global` - Pool has SV2 clients but no upstream server. +#[tokio::test] +async fn pool_api_global_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + let (sniffer, sniffer_addr) = start_sniffer("A", pool_addr, false, vec![], None); + start_mining_device_sv2(sniffer_addr, None, None, None, 1, None, true); + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, + ) + .await; + sniffer + .wait_for_message_type( + MessageDirection::ToDownstream, + MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, + ) + .await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Poll until SV2 clients appear in the global endpoint (typed) + let global = poll_until_global_sv2_clients_gte(pool_mon, 1, POLL_TIMEOUT).await; + + // Pool has no upstream server + assert!( + global.server.is_none(), + "Pool /api/v1/global should have null server (no upstream)" + ); + // Pool should report SV2 clients + assert_eq!( + global.sv2_clients.as_ref().unwrap().total_clients, + 1, + "Pool should see 1 SV2 client" + ); + // Pool has no SV1 clients + assert!( + global.sv1_clients.is_none(), + "Pool /api/v1/global should have null sv1_clients" + ); + assert!(global.uptime_secs > 0); + + pool.shutdown().await; +} + +// Pool JSON API: `/api/v1/server` returns 404 (pool has no upstream). +#[tokio::test] +async fn pool_api_server_returns_not_found() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, _pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Inline assertion: endpoint returns HTTP 404 with JSON error body + let (status, json) = fetch_api_with_status(pool_mon, "/api/v1/server").await; + assert_eq!( + status, 404, + "/api/v1/server should return HTTP 404, got {} with body: {}", + status, json + ); + assert!( + json["error"].is_string(), + "/api/v1/server should return JSON with 'error' field, got: {}", + json + ); + + pool.shutdown().await; +} + +// Pool JSON API: `/api/v1/clients` paginated list. +#[tokio::test] +async fn pool_api_clients_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + let (sniffer, sniffer_addr) = start_sniffer("A", pool_addr, false, vec![], None); + start_mining_device_sv2(sniffer_addr, None, None, None, 1, None, true); + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, + ) + .await; + sniffer + .wait_for_message_type( + MessageDirection::ToDownstream, + MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, + ) + .await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Poll until clients list is populated (typed) + let clients = poll_until_clients_total_gte(pool_mon, 1, POLL_TIMEOUT).await; + + assert_eq!(clients.total, 1, "Pool should have 1 SV2 client"); + assert_eq!(clients.items.len(), 1); + assert!( + clients.items[0].client_id > 0, + "Client should have a client_id" + ); + + pool.shutdown().await; +} + +// Pool JSON API: `/api/v1/clients/{id}` and non-existent client returns 404. +#[tokio::test] +async fn pool_api_client_by_id_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + let (sniffer, sniffer_addr) = start_sniffer("A", pool_addr, false, vec![], None); + start_mining_device_sv2(sniffer_addr, None, None, None, 1, None, true); + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, + ) + .await; + sniffer + .wait_for_message_type( + MessageDirection::ToDownstream, + MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, + ) + .await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Discover the client_id (typed) + let clients = poll_until_clients_total_gte(pool_mon, 1, POLL_TIMEOUT).await; + let client_id = clients.items[0].client_id; + + // Fetch single client (typed) + let client: ClientMetadata = + fetch_api_typed(pool_mon, &format!("/api/v1/clients/{}", client_id)).await; + assert_eq!(client.client_id, client_id); + + // Non-existent client should return 404 (typed) + let (status, _) = fetch_api_with_status(pool_mon, "/api/v1/clients/99999").await; + assert_eq!(status, 404, "Non-existent client should return 404"); + + pool.shutdown().await; +} + +// Pool JSON API: `/api/v1/clients/{id}/channels` paginated channels. +#[tokio::test] +async fn pool_api_client_channels_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + let (sniffer, sniffer_addr) = start_sniffer("A", pool_addr, false, vec![], None); + start_mining_device_sv2(sniffer_addr, None, None, None, 1, None, true); + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, + ) + .await; + sniffer + .wait_for_message_type( + MessageDirection::ToDownstream, + MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, + ) + .await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Discover the client_id (typed) + let clients = poll_until_clients_total_gte(pool_mon, 1, POLL_TIMEOUT).await; + let client_id = clients.items[0].client_id; + + // Fetch channels for this client (typed) + let channels: ClientChannelsResponse = + fetch_api_typed(pool_mon, &format!("/api/v1/clients/{}/channels", client_id)).await; + assert_eq!(channels.client_id, client_id); + // Mining device opens a standard channel + assert!( + channels.total_standard + channels.total_extended >= 1, + "Client should have at least 1 channel, got standard={}, extended={}", + channels.total_standard, + channels.total_extended + ); + + // Non-existent client channels should return 404 + let (status, _) = fetch_api_with_status(pool_mon, "/api/v1/clients/99999/channels").await; + assert_eq!( + status, 404, + "Non-existent client channels should return 404" + ); + + pool.shutdown().await; +} + +// Pool JSON API: `/api/v1/sv1/clients` returns 404 (pool has no SV1). +#[tokio::test] +async fn pool_api_sv1_clients_returns_not_found() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, _pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Inline assertion: endpoint returns HTTP 404 with JSON error body + let (status, json) = fetch_api_with_status(pool_mon, "/api/v1/sv1/clients").await; + assert_eq!(status, 404, "/api/v1/sv1/clients should return 404"); + assert!(json["error"].is_string(), "Should have error field"); + + pool.shutdown().await; +} + +// Pool JSON API: `/api/v1/server/channels` returns 404 (pool has no upstream). +#[tokio::test] +async fn pool_api_server_channels_returns_not_found() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, _pool_addr, pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; + + let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); + + // Inline assertion: endpoint returns HTTP 404 with JSON error body + let (status, json) = fetch_api_with_status(pool_mon, "/api/v1/server/channels").await; + assert_eq!(status, 404, "/api/v1/server/channels should return 404"); + assert!(json["error"].is_string(), "Should have error field"); + + pool.shutdown().await; +} + +// tProxy JSON API: `/api/v1/global` with SV1 and server data. +#[tokio::test] +async fn tproxy_api_global_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, _pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; + let (sniffer, sniffer_addr) = start_sniffer("0", pool_addr, false, vec![], None); + let (tproxy, tproxy_addr, tproxy_monitoring) = + start_sv2_translator(&[sniffer_addr], false, vec![], vec![], None, true).await; + let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, + ) + .await; + + let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); + + // Poll until SV1 clients appear in the global endpoint (typed) + let global = poll_until_global_sv1_clients_gte(tproxy_mon, 1, POLL_TIMEOUT).await; + + // tProxy has upstream server + assert!( + global.server.is_some(), + "tProxy /api/v1/global should have server data" + ); + assert!( + global.server.as_ref().unwrap().extended_channels >= 1, + "tProxy should have at least 1 extended upstream channel" + ); + // tProxy has SV1 clients + assert!( + global.sv1_clients.as_ref().unwrap().total_clients >= 1, + "tProxy should see at least 1 SV1 client" + ); + // tProxy has no SV2 downstreams + assert!( + global.sv2_clients.is_none(), + "tProxy should have null sv2_clients" + ); + + shutdown_all!(tproxy, pool); +} + +// tProxy JSON API: `/api/v1/server` upstream connection info. +#[tokio::test] +async fn tproxy_api_server_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, _pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; + let (sniffer, sniffer_addr) = start_sniffer("0", pool_addr, false, vec![], None); + let (tproxy, tproxy_addr, tproxy_monitoring) = + start_sv2_translator(&[sniffer_addr], false, vec![], vec![], None, true).await; + let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, + ) + .await; + + let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); + + // Poll until the server endpoint reports at least 1 extended channel (typed) + let server = poll_until_server_channels_gte(tproxy_mon, 1, POLL_TIMEOUT).await; + + assert!( + server.extended_channels_count >= 1, + "tProxy should have at least 1 extended channel upstream" + ); + + shutdown_all!(tproxy, pool); +} + +// tProxy JSON API: `/api/v1/server/channels` upstream channel details. +#[tokio::test] +async fn tproxy_api_server_channels_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, _pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; + let (sniffer, sniffer_addr) = start_sniffer("0", pool_addr, false, vec![], None); + let (tproxy, tproxy_addr, tproxy_monitoring) = + start_sv2_translator(&[sniffer_addr], false, vec![], vec![], None, true).await; + let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, + ) + .await; + + let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); + + // Poll until at least 1 extended upstream channel is visible + let channels = poll_until_api_field_gte( + tproxy_mon, + "/api/v1/server/channels", + "/total_extended", + 1.0, + POLL_TIMEOUT, + ) + .await; + + assert!( + channels["total_extended"].as_u64().unwrap() >= 1, + "tProxy should have at least 1 extended upstream channel" + ); + let ext_channels = channels["extended_channels"] + .as_array() + .expect("extended_channels should be an array"); + assert!(!ext_channels.is_empty(), "Should have channel details"); + // Each channel should have share data (server-side uses "shares_acknowledged") + assert!( + ext_channels[0]["shares_acknowledged"].as_u64().is_some(), + "Channel should have shares_acknowledged field, got: {}", + ext_channels[0] + ); + + shutdown_all!(tproxy, pool); +} + +// tProxy JSON API: `/api/v1/sv1/clients` SV1 miner list. +#[tokio::test] +async fn tproxy_api_sv1_clients_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, _pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; + let (sniffer, sniffer_addr) = start_sniffer("0", pool_addr, false, vec![], None); + let (tproxy, tproxy_addr, tproxy_monitoring) = + start_sv2_translator(&[sniffer_addr], false, vec![], vec![], None, true).await; + let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, + ) + .await; + + let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); + + // Poll until SV1 clients list is populated (typed) + let sv1_clients = poll_until_sv1_clients_total_gte(tproxy_mon, 1, POLL_TIMEOUT).await; + + assert!( + sv1_clients.total >= 1, + "tProxy should have at least 1 SV1 client" + ); + assert!(!sv1_clients.items.is_empty()); + assert!( + sv1_clients.items[0].client_id > 0, + "SV1 client should have client_id" + ); + + shutdown_all!(tproxy, pool); +} + +// tProxy JSON API: `/api/v1/sv1/clients/{id}` single SV1 client. +#[tokio::test] +async fn tproxy_api_sv1_client_by_id_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, _pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; + let (sniffer, sniffer_addr) = start_sniffer("0", pool_addr, false, vec![], None); + let (tproxy, tproxy_addr, tproxy_monitoring) = + start_sv2_translator(&[sniffer_addr], false, vec![], vec![], None, true).await; + let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; + + sniffer + .wait_for_message_type( + MessageDirection::ToUpstream, + MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, + ) + .await; + + let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); + + // Discover a client_id (typed) + let sv1_clients = poll_until_sv1_clients_total_gte(tproxy_mon, 1, POLL_TIMEOUT).await; + let sv1_client_id = sv1_clients.items[0].client_id; + + // Fetch single SV1 client by ID (typed) + let client: Sv1Client = fetch_api_typed( + tproxy_mon, + &format!("/api/v1/sv1/clients/{}", sv1_client_id), + ) + .await; + assert_eq!(client.client_id, sv1_client_id); + // Verify the field exists (minerd started without --userpass may leave it empty) + // authorized_worker_name is always present in the typed struct + + // Non-existent SV1 client should return 404 + let (status, _) = fetch_api_with_status(tproxy_mon, "/api/v1/sv1/clients/99999").await; + assert_eq!(status, 404, "Non-existent SV1 client should return 404"); + + shutdown_all!(tproxy, pool); +} + +// tProxy JSON API: `/api/v1/clients` returns 404 (tProxy has no SV2 downstreams). +#[tokio::test] +async fn tproxy_api_clients_returns_not_found() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, _pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; + let (tproxy, _tproxy_addr, tproxy_monitoring) = + start_sv2_translator(&[pool_addr], false, vec![], vec![], None, true).await; + + let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); + + // Inline assertion: endpoint returns HTTP 404 with JSON error body + let (status, json) = fetch_api_with_status(tproxy_mon, "/api/v1/clients").await; + assert_eq!(status, 404, "/api/v1/clients should return 404 for tProxy"); + assert!(json["error"].is_string(), "Should have error field"); + + shutdown_all!(tproxy, pool); +} + +// tProxy JSON API: root endpoint lists available APIs. +#[tokio::test] +async fn tproxy_api_root_endpoint() { + start_tracing(); + let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); + let (pool, pool_addr, _pool_monitoring) = + start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; + let (tproxy, _tproxy_addr, tproxy_monitoring) = + start_sv2_translator(&[pool_addr], false, vec![], vec![], None, true).await; + + let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); + + // Inline assertion: root endpoint returns expected API listing structure + let json = fetch_api_json(tproxy_mon, "/").await; + assert_eq!( + json["service"], "SRI Monitoring API", + "Root endpoint should return service name, got: {}", + json + ); + assert!( + json["endpoints"].is_object(), + "Root endpoint should list endpoints, got: {}", + json + ); + + shutdown_all!(tproxy, pool); +} diff --git a/stratum-apps/src/monitoring/http_server.rs b/stratum-apps/src/monitoring/http_server.rs index 7daf6ad02..0fee21f00 100644 --- a/stratum-apps/src/monitoring/http_server.rs +++ b/stratum-apps/src/monitoring/http_server.rs @@ -929,17 +929,116 @@ async fn handle_prometheus_metrics(State(state): State) -> Response #[cfg(test)] mod tests { use super::*; + use crate::monitoring::server::ServerInfo; use axum::body::Body; use http_body_util::BodyExt; use tower::ServiceExt; + // ── Route constants ───────────────────────────────────────────── + mod routes { + pub const ROOT: &str = "/"; + pub const HEALTH: &str = "/api/v1/health"; + pub const GLOBAL: &str = "/api/v1/global"; + pub const SERVER: &str = "/api/v1/server"; + pub const SERVER_CHANNELS: &str = "/api/v1/server/channels"; + pub const CLIENTS: &str = "/api/v1/clients"; + pub const SV1_CLIENTS: &str = "/api/v1/sv1/clients"; + pub const METRICS: &str = "/metrics"; + + pub fn client_by_id(id: u64) -> String { + format!("/api/v1/clients/{}", id) + } + + pub fn client_channels(id: u64) -> String { + format!("/api/v1/clients/{}/channels", id) + } + + pub fn sv1_client_by_id(id: u64) -> String { + format!("/api/v1/sv1/clients/{}", id) + } + } + + // ── Response types for JSON parsing ───────────────────────────── + #[derive(Debug, serde::Deserialize)] + struct ErrorResponseBody { + error: String, + } + + #[derive(Debug, serde::Deserialize)] + struct PaginatedResponse { + total: usize, + offset: usize, + limit: usize, + items: Vec, + } + + #[derive(Debug, serde::Deserialize)] + struct HealthResponseBody { + status: String, + } + + #[derive(Debug, serde::Deserialize)] + struct RootResponseBody { + service: String, + endpoints: serde_json::Value, + } + + #[derive(Debug, serde::Deserialize)] + struct GlobalResponseBody { + uptime_secs: u64, + server: Option, + sv2_clients: Option, + sv1_clients: Option, + } + + #[derive(Debug, serde::Deserialize)] + struct ServerSummaryBody { + extended_channels: u64, + } + + #[derive(Debug, serde::Deserialize)] + struct Sv2ClientsSummaryBody { + total_clients: u64, + } + + #[derive(Debug, serde::Deserialize)] + struct Sv1ClientsSummaryBody { + total_clients: u64, + } + + #[derive(Debug, serde::Deserialize)] + struct ServerResponseBody { + extended_channels_count: usize, + standard_channels_count: usize, + } + + #[derive(Debug, serde::Deserialize)] + struct ServerChannelsResponseBody { + offset: usize, + limit: usize, + total_extended: usize, + extended_channels: Vec, + } + + #[derive(Debug, serde::Deserialize)] + struct ClientMetadataBody { + client_id: u64, + } + + #[derive(Debug, serde::Deserialize)] + struct ClientChannelsResponseBody { + client_id: u64, + } + + #[derive(Debug, serde::Deserialize)] + struct Sv1ClientBody { + client_id: u64, + } + // ── helpers ────────────────────────────────────────────────────── - fn create_extended_channel_info( - channel_id: u32, - hashrate: f32, - ) -> super::super::client::ExtendedChannelInfo { - super::super::client::ExtendedChannelInfo { + fn create_extended_channel_info(channel_id: u32, hashrate: f32) -> ExtendedChannelInfo { + ExtendedChannelInfo { channel_id, user_identity: format!("user-ext-{}", channel_id), nominal_hashrate: hashrate, @@ -960,11 +1059,8 @@ mod tests { } } - fn create_standard_channel_info( - channel_id: u32, - hashrate: f32, - ) -> super::super::client::StandardChannelInfo { - super::super::client::StandardChannelInfo { + fn create_standard_channel_info(channel_id: u32, hashrate: f32) -> StandardChannelInfo { + StandardChannelInfo { channel_id, user_identity: format!("user-std-{}", channel_id), nominal_hashrate: hashrate, @@ -1039,22 +1135,22 @@ mod tests { } } - struct MockServer(super::super::server::ServerInfo); + struct MockServer(ServerInfo); impl ServerMonitoring for MockServer { - fn get_server(&self) -> super::super::server::ServerInfo { + fn get_server(&self) -> ServerInfo { self.0.clone() } } struct MockClients(Vec); - impl super::super::client::Sv2ClientsMonitoring for MockClients { + impl Sv2ClientsMonitoring for MockClients { fn get_sv2_clients(&self) -> Vec { self.0.clone() } } struct MockSv1Clients(Vec); - impl super::super::sv1::Sv1ClientsMonitoring for MockSv1Clients { + impl Sv1ClientsMonitoring for MockSv1Clients { fn get_sv1_clients(&self) -> Vec { self.0.clone() } @@ -1063,8 +1159,8 @@ mod tests { /// Build a full Router with mock data for integration testing. fn build_test_app( server: Option>, - clients: Option>, - sv1: Option>, + clients: Option>, + sv1: Option>, ) -> Router { let cache = Arc::new(SnapshotCache::new(Duration::from_secs(60), server, clients)); @@ -1210,43 +1306,43 @@ mod tests { #[tokio::test] async fn health_endpoint_returns_ok() { let app = build_test_app(None, None, None); - let response = app.oneshot(make_request("/api/v1/health")).await.unwrap(); + let response = app.oneshot(make_request(routes::HEALTH)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["status"], "ok"); - assert!(json["timestamp"].as_u64().is_some()); + let resp: HealthResponseBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.status, "ok"); } #[tokio::test] async fn root_endpoint_lists_endpoints() { let app = build_test_app(None, None, None); - let response = app.oneshot(make_request("/")).await.unwrap(); + let response = app.oneshot(make_request(routes::ROOT)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["service"], "SRI Monitoring API"); - assert!(json["endpoints"].is_object()); + let resp: RootResponseBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.service, "SRI Monitoring API"); + assert!(resp.endpoints.is_object()); } #[tokio::test] async fn global_endpoint_with_no_sources() { let app = build_test_app(None, None, None); - let response = app.oneshot(make_request("/api/v1/global")).await.unwrap(); + let response = app.oneshot(make_request(routes::GLOBAL)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert!(json["server"].is_null()); - assert!(json["sv2_clients"].is_null()); - assert!(json["uptime_secs"].as_u64().is_some()); + let resp: GlobalResponseBody = serde_json::from_str(&body).unwrap(); + assert!(resp.server.is_none()); + assert!(resp.sv2_clients.is_none()); + // uptime_secs can be 0 if test runs fast enough, just verify it parses + let _ = resp.uptime_secs; } #[tokio::test] async fn global_endpoint_with_data() { - let server = Arc::new(MockServer(super::super::server::ServerInfo { + let server = Arc::new(MockServer(ServerInfo { extended_channels: vec![create_server_extended_channel_info(1, Some(100.0))], standard_channels: vec![], })); @@ -1258,28 +1354,28 @@ mod tests { let app = build_test_app( Some(server as Arc), - Some(clients as Arc), + Some(clients as Arc), None, ); - let response = app.oneshot(make_request("/api/v1/global")).await.unwrap(); + let response = app.oneshot(make_request(routes::GLOBAL)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["server"]["extended_channels"], 1); - assert_eq!(json["sv2_clients"]["total_clients"], 1); + let resp: GlobalResponseBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.server.as_ref().unwrap().extended_channels, 1); + assert_eq!(resp.sv2_clients.as_ref().unwrap().total_clients, 1); } #[tokio::test] async fn server_endpoint_not_available() { let app = build_test_app(None, None, None); - let response = app.oneshot(make_request("/api/v1/server")).await.unwrap(); + let response = app.oneshot(make_request(routes::SERVER)).await.unwrap(); assert_eq!(response.status(), StatusCode::NOT_FOUND); } #[tokio::test] async fn server_endpoint_with_data() { - let server = Arc::new(MockServer(super::super::server::ServerInfo { + let server = Arc::new(MockServer(ServerInfo { extended_channels: vec![create_server_extended_channel_info(1, Some(100.0))], standard_channels: vec![create_server_standard_channel_info(2, Some(50.0))], })); @@ -1289,18 +1385,18 @@ mod tests { None, None, ); - let response = app.oneshot(make_request("/api/v1/server")).await.unwrap(); + let response = app.oneshot(make_request(routes::SERVER)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["extended_channels_count"], 1); - assert_eq!(json["standard_channels_count"], 1); + let resp: ServerResponseBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.extended_channels_count, 1); + assert_eq!(resp.standard_channels_count, 1); } #[tokio::test] async fn server_channels_endpoint_with_pagination() { - let server = Arc::new(MockServer(super::super::server::ServerInfo { + let server = Arc::new(MockServer(ServerInfo { extended_channels: vec![ create_server_extended_channel_info(1, Some(100.0)), create_server_extended_channel_info(2, Some(200.0)), @@ -1315,23 +1411,26 @@ mod tests { None, ); let response = app - .oneshot(make_request("/api/v1/server/channels?offset=1&limit=1")) + .oneshot(make_request(&format!( + "{}?offset=1&limit=1", + routes::SERVER_CHANNELS + ))) .await .unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["total_extended"], 3); - assert_eq!(json["offset"], 1); - assert_eq!(json["limit"], 1); - assert_eq!(json["extended_channels"].as_array().unwrap().len(), 1); + let resp: ServerChannelsResponseBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.total_extended, 3); + assert_eq!(resp.offset, 1); + assert_eq!(resp.limit, 1); + assert_eq!(resp.extended_channels.len(), 1); } #[tokio::test] async fn clients_endpoint_not_available() { let app = build_test_app(None, None, None); - let response = app.oneshot(make_request("/api/v1/clients")).await.unwrap(); + let response = app.oneshot(make_request(routes::CLIENTS)).await.unwrap(); assert_eq!(response.status(), StatusCode::NOT_FOUND); } @@ -1352,17 +1451,17 @@ mod tests { let app = build_test_app( None, - Some(clients as Arc), + Some(clients as Arc), None, ); - let response = app.oneshot(make_request("/api/v1/clients")).await.unwrap(); + let response = app.oneshot(make_request(routes::CLIENTS)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["total"], 2); - assert_eq!(json["items"].as_array().unwrap().len(), 2); - assert_eq!(json["items"][0]["client_id"], 1); + let resp: PaginatedResponse = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.total, 2); + assert_eq!(resp.items.len(), 2); + assert_eq!(resp.items[0].client_id, 1); } #[tokio::test] @@ -1375,20 +1474,18 @@ mod tests { let app = build_test_app( None, - Some(clients as Arc), + Some(clients as Arc), None, ); let response = app - .oneshot(make_request("/api/v1/clients/42")) + .oneshot(make_request(&routes::client_by_id(42))) .await .unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["client_id"], 42); - assert_eq!(json["extended_channels_count"], 1); - assert_eq!(json["standard_channels_count"], 1); + let resp: ClientMetadataBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.client_id, 42); } #[tokio::test] @@ -1401,11 +1498,11 @@ mod tests { let app = build_test_app( None, - Some(clients as Arc), + Some(clients as Arc), None, ); let response = app - .oneshot(make_request("/api/v1/clients/999")) + .oneshot(make_request(&routes::client_by_id(999))) .await .unwrap(); assert_eq!(response.status(), StatusCode::NOT_FOUND); @@ -1425,27 +1522,28 @@ mod tests { let app = build_test_app( None, - Some(clients as Arc), + Some(clients as Arc), None, ); let response = app - .oneshot(make_request("/api/v1/clients/1/channels?offset=1&limit=2")) + .oneshot(make_request(&format!( + "{}?offset=1&limit=2", + routes::client_channels(1) + ))) .await .unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["client_id"], 1); - assert_eq!(json["total_extended"], 3); - assert_eq!(json["extended_channels"].as_array().unwrap().len(), 2); + let resp: ClientChannelsResponseBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.client_id, 1); } #[tokio::test] async fn sv1_clients_not_available() { let app = build_test_app(None, None, None); let response = app - .oneshot(make_request("/api/v1/sv1/clients")) + .oneshot(make_request(routes::SV1_CLIENTS)) .await .unwrap(); assert_eq!(response.status(), StatusCode::NOT_FOUND); @@ -1461,18 +1559,18 @@ mod tests { let app = build_test_app( None, None, - Some(sv1 as Arc), + Some(sv1 as Arc), ); let response = app - .oneshot(make_request("/api/v1/sv1/clients")) + .oneshot(make_request(routes::SV1_CLIENTS)) .await .unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["total"], 2); - assert_eq!(json["items"].as_array().unwrap().len(), 2); + let resp: PaginatedResponse = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.total, 2); + assert_eq!(resp.items.len(), 2); } #[tokio::test] @@ -1482,17 +1580,17 @@ mod tests { let app = build_test_app( None, None, - Some(sv1 as Arc), + Some(sv1 as Arc), ); let response = app - .oneshot(make_request("/api/v1/sv1/clients/7")) + .oneshot(make_request(&routes::sv1_client_by_id(7))) .await .unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; - let json: serde_json::Value = serde_json::from_str(&body).unwrap(); - assert_eq!(json["client_id"], 7); + let resp: Sv1ClientBody = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.client_id, 7); } #[tokio::test] @@ -1502,10 +1600,10 @@ mod tests { let app = build_test_app( None, None, - Some(sv1 as Arc), + Some(sv1 as Arc), ); let response = app - .oneshot(make_request("/api/v1/sv1/clients/999")) + .oneshot(make_request(&routes::sv1_client_by_id(999))) .await .unwrap(); assert_eq!(response.status(), StatusCode::NOT_FOUND); @@ -1513,7 +1611,7 @@ mod tests { #[tokio::test] async fn metrics_endpoint_returns_prometheus_format() { - let server = Arc::new(MockServer(super::super::server::ServerInfo { + let server = Arc::new(MockServer(ServerInfo { extended_channels: vec![create_server_extended_channel_info(1, Some(100.0))], standard_channels: vec![], })); @@ -1525,10 +1623,10 @@ mod tests { let app = build_test_app( Some(server as Arc), - Some(clients as Arc), + Some(clients as Arc), None, ); - let response = app.oneshot(make_request("/metrics")).await.unwrap(); + let response = app.oneshot(make_request(routes::METRICS)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; @@ -1540,7 +1638,7 @@ mod tests { #[tokio::test] async fn metrics_endpoint_with_no_sources() { let app = build_test_app(None, None, None); - let response = app.oneshot(make_request("/metrics")).await.unwrap(); + let response = app.oneshot(make_request(routes::METRICS)).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); let body = get_body(response).await; @@ -1550,4 +1648,193 @@ mod tests { assert!(!body.contains("sv2_server_channels")); assert!(!body.contains("sv2_clients_total")); } + + // ── Edge-case unit tests (pagination, missing data, invalid params) ── + + #[test] + fn paginate_with_limit_zero() { + // effective_limit(Some(0)) = 0.min(MAX_LIMIT) = 0, so take(0) returns nothing + let items: Vec = (0..50).collect(); + let params = Pagination { + offset: 0, + limit: Some(0), + }; + let (total, result) = paginate(&items, ¶ms); + assert_eq!(total, 50); + assert!(result.is_empty(), "limit=0 should return no items"); + } + + #[tokio::test] + async fn server_channels_not_available() { + let app = build_test_app(None, None, None); + let response = app + .oneshot(make_request(routes::SERVER_CHANNELS)) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + + let body = get_body(response).await; + let resp: ErrorResponseBody = serde_json::from_str(&body).unwrap(); + assert!(!resp.error.is_empty()); + } + + #[tokio::test] + async fn client_by_id_no_monitoring() { + // When client monitoring is not available at all, any client_id returns 404 + let app = build_test_app(None, None, None); + let response = app + .oneshot(make_request(&routes::client_by_id(1))) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + + let body = get_body(response).await; + let resp: ErrorResponseBody = serde_json::from_str(&body).unwrap(); + assert!(!resp.error.is_empty()); + } + + #[tokio::test] + async fn client_channels_client_not_found() { + // Client monitoring is available but the specific client_id does not exist + let clients = Arc::new(MockClients(vec![Sv2ClientInfo { + client_id: 1, + extended_channels: vec![], + standard_channels: vec![], + }])); + + let app = build_test_app( + None, + Some(clients as Arc), + None, + ); + let response = app + .oneshot(make_request(&routes::client_channels(999))) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + + let body = get_body(response).await; + let resp: ErrorResponseBody = serde_json::from_str(&body).unwrap(); + assert!(resp.error.contains("999")); + } + + #[tokio::test] + async fn client_channels_no_monitoring() { + // When client monitoring is not available at all + let app = build_test_app(None, None, None); + let response = app + .oneshot(make_request(&routes::client_channels(1))) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + } + + #[tokio::test] + async fn sv1_client_by_id_no_monitoring() { + // When SV1 monitoring is not available at all + let app = build_test_app(None, None, None); + let response = app + .oneshot(make_request(&routes::sv1_client_by_id(1))) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + + let body = get_body(response).await; + let resp: ErrorResponseBody = serde_json::from_str(&body).unwrap(); + assert!(!resp.error.is_empty()); + } + + #[tokio::test] + async fn clients_pagination_offset_and_limit() { + let clients = Arc::new(MockClients(vec![ + Sv2ClientInfo { + client_id: 1, + extended_channels: vec![create_extended_channel_info(1, 100.0)], + standard_channels: vec![], + }, + Sv2ClientInfo { + client_id: 2, + extended_channels: vec![], + standard_channels: vec![create_standard_channel_info(1, 50.0)], + }, + Sv2ClientInfo { + client_id: 3, + extended_channels: vec![create_extended_channel_info(2, 200.0)], + standard_channels: vec![], + }, + ])); + + let app = build_test_app( + None, + Some(clients as Arc), + None, + ); + let response = app + .oneshot(make_request(&format!( + "{}?offset=1&limit=1", + routes::CLIENTS + ))) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let body = get_body(response).await; + let resp: PaginatedResponse = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.total, 3); + assert_eq!(resp.offset, 1); + assert_eq!(resp.limit, 1); + assert_eq!(resp.items.len(), 1); + assert_eq!(resp.items[0].client_id, 2); + } + + #[tokio::test] + async fn sv1_clients_pagination() { + let sv1 = Arc::new(MockSv1Clients(vec![ + create_sv1_client_info(1, Some(100.0)), + create_sv1_client_info(2, Some(200.0)), + create_sv1_client_info(3, Some(300.0)), + ])); + + let app = build_test_app( + None, + None, + Some(sv1 as Arc), + ); + let response = app + .oneshot(make_request(&format!( + "{}?offset=2&limit=10", + routes::SV1_CLIENTS + ))) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let body = get_body(response).await; + let resp: PaginatedResponse = serde_json::from_str(&body).unwrap(); + assert_eq!(resp.total, 3); + assert_eq!(resp.offset, 2); + assert_eq!(resp.items.len(), 1); + assert_eq!(resp.items[0].client_id, 3); + } + + #[tokio::test] + async fn global_endpoint_with_sv1_data() { + let sv1 = Arc::new(MockSv1Clients(vec![create_sv1_client_info(1, Some(100.0))])); + + let app = build_test_app( + None, + None, + Some(sv1 as Arc), + ); + let response = app.oneshot(make_request(routes::GLOBAL)).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let body = get_body(response).await; + let resp: GlobalResponseBody = serde_json::from_str(&body).unwrap(); + // Server and SV2 clients should be None + assert!(resp.server.is_none()); + assert!(resp.sv2_clients.is_none()); + // SV1 clients should be present + assert_eq!(resp.sv1_clients.as_ref().unwrap().total_clients, 1); + } }