diff --git a/server/src/handlers/project.rs b/server/src/handlers/project.rs index 60d5b29..581fdf8 100644 --- a/server/src/handlers/project.rs +++ b/server/src/handlers/project.rs @@ -7,7 +7,8 @@ use axum::{ Json, Router, }; use bollard::exec::{CreateExecOptions, StartExecResults}; -use bollard::image::{CreateImageOptions, RemoveImageOptions}; +use bollard::image::{CreateImageOptions, RemoveImageOptions}; +use bollard::container::ListContainersOptions; use tower_sessions::Session; use uuid::Uuid; use serde::Deserialize; @@ -335,6 +336,53 @@ pub async fn publish_handler( Ok(Json("Published!".to_string())) } +/// Helper function to find an existing viewer container for a project +/// This is used to reuse containers for old projects that don't have session_id labels +async fn find_existing_viewer_container(state: &AppState, project_slug: &str, owner_id: i64) -> Option { + // First, check in-memory sessions for a match + { + let sessions = state.lock_sessions(); + for (session_id, ctx) in sessions.iter() { + if ctx.project_owner_id == Some(owner_id) + && ctx.project_slug.as_deref() == Some(project_slug) + && !ctx.container_name.is_empty() + && ctx.container_name != "INITIALIZING" { + return Some(session_id.clone()); + } + } + } + + // If not in memory, check Docker containers + // First try with project_slug label (new containers) + let filters_with_slug = HashMap::from([ + ("label".to_string(), vec![ + "managed_by=TryCli Studio".to_string(), + "container_type=viewer".to_string(), + format!("project_owner_id={}", owner_id), + format!("project_slug={}", project_slug), + ]) + ]); + + let opts = ListContainersOptions { + all: false, + filters: filters_with_slug, + ..Default::default() + }; + + if let Ok(containers) = state.docker.list_containers(Some(opts)).await { + if let Some(container) = containers.first() { + // Extract session_id from labels + if let Some(labels) = &container.labels { + if let Some(session_id) = labels.get("session_id") { + return Some(session_id.clone()); + } + } + } + } + + None +} + pub async fn get_project( Path((username, slug)): Path<(String, String)>, State(state): State, @@ -477,23 +525,32 @@ pub async fn get_project( } // 7. Construct JSON response - // Generate the session ID here, but DO NOT spawn Docker yet. - let session_id = Uuid::new_v4().to_string(); - - { - let mut map = state.lock_sessions(); - map.insert(session_id.clone(), SessionContext { - container_name: String::new(), // Empty indicates "Not Started" - pending_image_tag: Some(image_tag), // Store tag for WS handler - shell, - owner_id: None, - project_owner_id: Some(owner_id), - is_publishing: false, - project_slug: Some(slug), - created_at: std::time::Instant::now(), - is_ws_connected: false, - }); - } + // Check if there's an existing viewer container for this project + let session_id = match find_existing_viewer_container(&state, &slug, owner_id).await { + Some(existing_session_id) => { + tracing::info!("Reusing existing session {} for project {}/{}", existing_session_id, username, slug); + existing_session_id + } + None => { + // Generate a new session ID and prepare for lazy container spawn + let new_session_id = Uuid::new_v4().to_string(); + + let mut map = state.lock_sessions(); + map.insert(new_session_id.clone(), SessionContext { + container_name: String::new(), // Empty indicates "Not Started" + pending_image_tag: Some(image_tag.clone()), // Store tag for WS handler + shell: shell.clone(), + owner_id: None, + project_owner_id: Some(owner_id), + is_publishing: false, + project_slug: Some(slug.clone()), + created_at: std::time::Instant::now(), + is_ws_connected: false, + }); + + new_session_id + } + }; let mut response_json = serde_json::json!({ "markdown": markdown, diff --git a/server/src/services/websocket.rs b/server/src/services/websocket.rs index 760077e..aac640f 100644 --- a/server/src/services/websocket.rs +++ b/server/src/services/websocket.rs @@ -47,6 +47,24 @@ fn create_container_labels( labels } +/// Calculate an approximate creation time based on container's created timestamp +/// This converts a Unix timestamp to an Instant for session tracking +fn calculate_session_created_at(created_ts: Option) -> std::time::Instant { + if let Some(created_ts) = created_ts { + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() as i64; + let age_secs = now - created_ts; + + std::time::Instant::now() + .checked_sub(std::time::Duration::from_secs(age_secs.max(0) as u64)) + .unwrap_or_else(|| std::time::Instant::now()) + } else { + std::time::Instant::now() + } +} + /// Restore sessions from existing Docker containers on server startup /// This allows pre-existing containers to be reconnected after server restart pub async fn restore_sessions_from_containers(state: &AppState) { @@ -72,47 +90,70 @@ pub async fn restore_sessions_from_containers(state: &AppState) { let project_owner_id = labels.get("project_owner_id").and_then(|s| s.parse::().ok()); let project_slug = labels.get("project_slug").map(|s| s.clone()); - if let (Some(session_id), Some(names)) = (session_id, container.names) { - let container_name = names.first() - .map(|n| n.trim_start_matches('/').to_string()) - .unwrap_or_default(); + let container_name = container.names + .as_ref() + .and_then(|names| names.first()) + .map(|n| n.trim_start_matches('/').to_string()) + .unwrap_or_default(); + + if container_name.is_empty() { + continue; + } + + // Handle new containers (with session_id label) + if let Some(session_id) = session_id { + let mut map = state.lock_sessions(); - if !container_name.is_empty() { - // Restore session to in-memory map + if !map.contains_key(&session_id) { + let created_at = calculate_session_created_at(container.created); + + map.insert(session_id.clone(), SessionContext { + container_name: container_name.clone(), + shell, + pending_image_tag: None, + owner_id, + project_owner_id, + is_publishing: false, + project_slug, + created_at, + is_ws_connected: false, + }); + restored += 1; + tracing::info!("Restored session {} with container {}", session_id, container_name); + } + } else { + // Handle legacy containers (without session_id label) + // Extract UUID from container name for use as session_id + let legacy_session_id = if container_name.starts_with("trycli-studio-viewer-") { + container_name.strip_prefix("trycli-studio-viewer-").map(|s| s.to_string()) + } else if container_name.starts_with("trycli-studio-session-") { + container_name.strip_prefix("trycli-studio-session-").map(|s| s.to_string()) + } else { + None + }; + + if let Some(legacy_session_id) = legacy_session_id { let mut map = state.lock_sessions(); - // Only restore if not already present (shouldn't happen, but be defensive) - if !map.contains_key(&session_id) { - // Calculate elapsed time from container creation - let created_at = if let Some(created_ts) = container.created { - // Container age in seconds - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs() as i64; - let age_secs = now - created_ts; - - // Set created_at to approximate original time - std::time::Instant::now() - .checked_sub(std::time::Duration::from_secs(age_secs.max(0) as u64)) - .unwrap_or_else(|| std::time::Instant::now()) - } else { - std::time::Instant::now() - }; + if !map.contains_key(&legacy_session_id) { + let created_at = calculate_session_created_at(container.created); + + // For legacy containers, we don't know the exact metadata + // Set reasonable defaults based on container type - map.insert(session_id.clone(), SessionContext { + map.insert(legacy_session_id.clone(), SessionContext { container_name: container_name.clone(), shell, pending_image_tag: None, - owner_id, - project_owner_id, + owner_id: None, // Unknown for legacy containers + project_owner_id: None, // Unknown for legacy containers is_publishing: false, - project_slug, + project_slug: None, // Unknown for legacy containers created_at, - is_ws_connected: false, // Will be set to true on reconnection + is_ws_connected: false, }); restored += 1; - tracing::info!("Restored session {} with container {}", session_id, container_name); + tracing::info!("Restored legacy session {} from container {}", legacy_session_id, container_name); } } } @@ -171,7 +212,8 @@ pub async fn ws_handler( /// Attempt to restore a specific session from Docker containers /// This is called when a client tries to connect to a session that isn't in memory async fn restore_specific_session(state: &AppState, session_id: &str) { - let filters = HashMap::from([ + // First, try to find a container with the session_id label (new containers) + let filters_with_label = HashMap::from([ ("label".to_string(), vec![ "managed_by=TryCli Studio".to_string(), format!("session_id={}", session_id) @@ -180,7 +222,7 @@ async fn restore_specific_session(state: &AppState, session_id: &str) { let opts = ListContainersOptions { all: false, // Only running containers - filters, + filters: filters_with_label, ..Default::default() }; @@ -198,20 +240,7 @@ async fn restore_specific_session(state: &AppState, session_id: &str) { .unwrap_or_default(); if !container_name.is_empty() { - // Calculate elapsed time from container creation - let created_at = if let Some(created_ts) = container.created { - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs() as i64; - let age_secs = now - created_ts; - - std::time::Instant::now() - .checked_sub(std::time::Duration::from_secs(age_secs.max(0) as u64)) - .unwrap_or_else(|| std::time::Instant::now()) - } else { - std::time::Instant::now() - }; + let created_at = calculate_session_created_at(container.created); let mut map = state.lock_sessions(); map.insert(session_id.to_string(), SessionContext { @@ -232,6 +261,59 @@ async fn restore_specific_session(state: &AppState, session_id: &str) { } } } + + // If not found, try to find a legacy container by matching container name + // Legacy containers have UUID in name: trycli-studio-viewer-{uuid} or trycli-studio-session-{uuid} + let legacy_container_names = vec![ + format!("trycli-studio-viewer-{}", session_id), + format!("trycli-studio-session-{}", session_id), + ]; + + for legacy_name in legacy_container_names { + // Try to find container by exact name match + let filters_by_name = HashMap::from([ + ("label".to_string(), vec!["managed_by=TryCli Studio".to_string()]), + ("name".to_string(), vec![legacy_name.clone()]) + ]); + + let opts = ListContainersOptions { + all: false, + filters: filters_by_name, + ..Default::default() + }; + + if let Ok(containers) = state.docker.list_containers(Some(opts)).await { + if let Some(container) = containers.first() { + let labels = container.labels.as_ref(); + let shell = labels + .and_then(|l| l.get("shell")) + .map(|s| s.clone()) + .unwrap_or_else(|| "/bin/bash".to_string()); + + let container_name = container.names.as_ref() + .and_then(|names| names.first()) + .map(|n| n.trim_start_matches('/').to_string()) + .unwrap_or(legacy_name.clone()); + + let created_at = calculate_session_created_at(container.created); + + let mut map = state.lock_sessions(); + map.insert(session_id.to_string(), SessionContext { + container_name: container_name.clone(), + shell, + pending_image_tag: None, + owner_id: None, // Unknown for legacy containers + project_owner_id: None, + is_publishing: false, + project_slug: None, + created_at, + is_ws_connected: false, + }); + tracing::info!("Restored legacy session {} from container {}", session_id, container_name); + return; + } + } + } } async fn handle_socket(mut socket: WebSocket, state: AppState, session_id: String, user_id: Option) {