diff --git a/Cargo.lock b/Cargo.lock index 67ca6b8..cf7ef91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -428,6 +428,7 @@ version = "0.1.0" dependencies = [ "chrono", "dirs", + "glob", "hex", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index e42518d..bbce069 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,6 +78,7 @@ hex = "0.4" base64 = "0.22" semver = "1" shellexpand = "3" +glob = "0.3" futures-util = "0.3" serde_yaml = "0.9" diff --git a/coast-cli/src/commands/lookup.rs b/coast-cli/src/commands/lookup.rs index d6fc17c..7da702a 100644 --- a/coast-cli/src/commands/lookup.rs +++ b/coast-cli/src/commands/lookup.rs @@ -147,11 +147,28 @@ pub fn detect_worktree() -> Result> { let cwd = std::env::current_dir().context("Failed to get current directory")?; if let Ok((project_root, worktree_dirs)) = find_project_root_and_worktree_dirs(&cwd) { + use coast_core::coastfile::Coastfile; + for dir in &worktree_dirs { - let resolved = - coast_core::coastfile::Coastfile::resolve_worktree_dir(&project_root, dir); - if let Some(name) = detect_worktree_from_paths(&cwd, &resolved)? { - return Ok(Some(name)); + if Coastfile::is_glob_pattern(dir) { + let expanded = Coastfile::resolve_external_worktree_dirs_expanded( + &worktree_dirs, + &project_root, + ); + for ext_dir in &expanded { + if ext_dir.raw_pattern == *dir { + if let Some(name) = + detect_worktree_from_paths(&cwd, &ext_dir.resolved_path)? + { + return Ok(Some(name)); + } + } + } + } else { + let resolved = Coastfile::resolve_worktree_dir(&project_root, dir); + if let Some(name) = detect_worktree_from_paths(&cwd, &resolved)? { + return Ok(Some(name)); + } } } } @@ -187,10 +204,13 @@ fn detect_worktree_via_git(cwd: &Path) -> Result> { let stdout = String::from_utf8_lossy(&output.stdout); let worktree_dirs = load_worktree_dirs_from_project(&real_project_root); - let external_dirs: Vec = worktree_dirs - .iter() - .filter(|d| coast_core::coastfile::Coastfile::is_external_worktree_dir(d)) - .map(|d| coast_core::coastfile::Coastfile::resolve_worktree_dir(&real_project_root, d)) + let external_dirs: Vec = + coast_core::coastfile::Coastfile::resolve_external_worktree_dirs_expanded( + &worktree_dirs, + &real_project_root, + ) + .into_iter() + .map(|d| d.resolved_path) .collect(); let mut current_path: Option = None; diff --git a/coast-core/Cargo.toml b/coast-core/Cargo.toml index 6b57b0a..1345abb 100644 --- a/coast-core/Cargo.toml +++ b/coast-core/Cargo.toml @@ -15,6 +15,7 @@ shellexpand = { workspace = true } sha2 = { workspace = true } hex = { workspace = true } dirs = { workspace = true } +glob = { workspace = true } ts-rs = { workspace = true } diff --git a/coast-core/src/coastfile/mod.rs b/coast-core/src/coastfile/mod.rs index a6bb3d4..3e15120 100644 --- a/coast-core/src/coastfile/mod.rs +++ b/coast-core/src/coastfile/mod.rs @@ -762,4 +762,75 @@ impl Coastfile { pub fn external_mount_path(index: usize) -> String { format!("{EXTERNAL_WORKTREE_MOUNT_PREFIX}/{index}") } + + /// Returns `true` if a worktree dir path contains glob metacharacters (`*`, `?`, `[`). + pub fn is_glob_pattern(dir: &str) -> bool { + dir.contains('*') || dir.contains('?') || dir.contains('[') + } + + /// Resolve all external worktree dirs, expanding glob patterns. + /// + /// Non-glob entries keep their original `worktree_dirs` index as the mount + /// index (backward compatible). For glob entries the first match reuses the + /// original index; additional matches are allocated sequentially starting + /// from `worktree_dirs.len()`. + pub fn resolve_external_worktree_dirs_expanded( + worktree_dirs: &[String], + project_root: &Path, + ) -> Vec { + let mut results = Vec::new(); + let mut overflow_index = worktree_dirs.len(); + + for (idx, dir) in worktree_dirs.iter().enumerate() { + if !Self::is_external_worktree_dir(dir) { + continue; + } + let resolved = Self::resolve_worktree_dir(project_root, dir); + let resolved_str = resolved.to_string_lossy().to_string(); + + if Self::is_glob_pattern(&resolved_str) { + let mut matches: Vec = glob::glob(&resolved_str) + .into_iter() + .flatten() + .filter_map(std::result::Result::ok) + .filter(|p| p.is_dir()) + .collect(); + matches.sort(); + + for (i, matched_path) in matches.into_iter().enumerate() { + let mount_index = if i == 0 { + idx + } else { + let mi = overflow_index; + overflow_index += 1; + mi + }; + results.push(ResolvedExternalDir { + mount_index, + raw_pattern: dir.clone(), + resolved_path: matched_path, + }); + } + } else { + results.push(ResolvedExternalDir { + mount_index: idx, + raw_pattern: dir.clone(), + resolved_path: resolved, + }); + } + } + + results + } +} + +/// A resolved external worktree directory, possibly expanded from a glob pattern. +#[derive(Debug, Clone)] +pub struct ResolvedExternalDir { + /// Index used for the container mount path (`/host-external-wt/{mount_index}`). + pub mount_index: usize, + /// The original pattern string from the Coastfile (e.g. `~/.shep/repos/*/wt`). + pub raw_pattern: String, + /// The fully resolved absolute path on the host. + pub resolved_path: PathBuf, } diff --git a/coast-core/src/coastfile/tests_parsing.rs b/coast-core/src/coastfile/tests_parsing.rs index 27a2d3f..64f242d 100644 --- a/coast-core/src/coastfile/tests_parsing.rs +++ b/coast-core/src/coastfile/tests_parsing.rs @@ -1694,6 +1694,104 @@ fn test_external_mount_path() { assert_eq!(Coastfile::external_mount_path(3), "/host-external-wt/3"); } +// --------------------------------------------------------------------------- +// Glob pattern tests +// --------------------------------------------------------------------------- + +#[test] +fn test_is_glob_pattern() { + assert!(Coastfile::is_glob_pattern("~/.shep/repos/*/wt")); + assert!(Coastfile::is_glob_pattern("/foo/ba?/baz")); + assert!(Coastfile::is_glob_pattern("/foo/[abc]/bar")); + assert!(!Coastfile::is_glob_pattern("~/.codex/worktrees")); + assert!(!Coastfile::is_glob_pattern(".worktrees")); + assert!(!Coastfile::is_glob_pattern("/absolute/path")); +} + +#[test] +fn test_resolve_external_worktree_dirs_expanded_no_globs() { + let dir = tempfile::tempdir().unwrap(); + let dirs = vec![".worktrees".to_string(), "~/.codex/worktrees".to_string()]; + let result = Coastfile::resolve_external_worktree_dirs_expanded(&dirs, dir.path()); + assert_eq!(result.len(), 1); + assert_eq!(result[0].mount_index, 1); + assert_eq!(result[0].raw_pattern, "~/.codex/worktrees"); +} + +#[test] +fn test_resolve_external_worktree_dirs_expanded_glob_with_matches() { + let dir = tempfile::tempdir().unwrap(); + let ext = dir.path().join("ext"); + std::fs::create_dir_all(ext.join("aaa").join("wt")).unwrap(); + std::fs::create_dir_all(ext.join("bbb").join("wt")).unwrap(); + std::fs::create_dir_all(ext.join("ccc")).unwrap(); // no "wt" subdir + + let pattern = format!("{}/*/wt", ext.display()); + let dirs = vec![".worktrees".to_string(), pattern.clone()]; + let result = Coastfile::resolve_external_worktree_dirs_expanded(&dirs, dir.path()); + + assert_eq!(result.len(), 2, "should match aaa/wt and bbb/wt"); + assert_eq!( + result[0].mount_index, 1, + "first match reuses original index" + ); + assert_eq!( + result[1].mount_index, 2, + "second match overflows to dirs.len()" + ); + assert!(result[0].resolved_path.ends_with("aaa/wt")); + assert!(result[1].resolved_path.ends_with("bbb/wt")); + assert_eq!(result[0].raw_pattern, pattern); +} + +#[test] +fn test_resolve_external_worktree_dirs_expanded_glob_no_matches() { + let dir = tempfile::tempdir().unwrap(); + let pattern = format!("{}/nonexistent/*/wt", dir.path().display()); + let dirs = vec![".worktrees".to_string(), pattern]; + let result = Coastfile::resolve_external_worktree_dirs_expanded(&dirs, dir.path()); + assert!(result.is_empty(), "no matches should produce empty result"); +} + +#[test] +fn test_resolve_external_worktree_dirs_expanded_preserves_non_glob_index() { + let dir = tempfile::tempdir().unwrap(); + let ext = dir.path().join("ext"); + std::fs::create_dir_all(ext.join("hash1").join("wt")).unwrap(); + + let glob_pattern = format!("{}/*/wt", ext.display()); + let dirs = vec![ + ".worktrees".to_string(), // index 0 (local) + "~/.codex/worktrees".to_string(), // index 1 (external, non-glob) + glob_pattern, // index 2 (external, glob) + "/some/literal/path".to_string(), // index 3 (external, non-glob) + ]; + let result = Coastfile::resolve_external_worktree_dirs_expanded(&dirs, dir.path()); + + assert_eq!(result.len(), 3); + assert_eq!(result[0].mount_index, 1, "codex keeps index 1"); + assert_eq!(result[1].mount_index, 2, "glob first match keeps index 2"); + assert_eq!(result[2].mount_index, 3, "literal keeps index 3"); +} + +#[test] +fn test_resolve_external_worktree_dirs_expanded_sorted_deterministic() { + let dir = tempfile::tempdir().unwrap(); + let ext = dir.path().join("repos"); + std::fs::create_dir_all(ext.join("zzz").join("wt")).unwrap(); + std::fs::create_dir_all(ext.join("aaa").join("wt")).unwrap(); + std::fs::create_dir_all(ext.join("mmm").join("wt")).unwrap(); + + let pattern = format!("{}/*/wt", ext.display()); + let dirs = vec![pattern]; + let result = Coastfile::resolve_external_worktree_dirs_expanded(&dirs, dir.path()); + + assert_eq!(result.len(), 3); + assert!(result[0].resolved_path.ends_with("aaa/wt"), "sorted first"); + assert!(result[1].resolved_path.ends_with("mmm/wt"), "sorted second"); + assert!(result[2].resolved_path.ends_with("zzz/wt"), "sorted third"); +} + #[test] fn test_worktree_dir_string_or_vec_compat() { let dir = tempfile::tempdir().unwrap(); diff --git a/coast-daemon/src/api/query/project_git.rs b/coast-daemon/src/api/query/project_git.rs index 79db0c3..4e98542 100644 --- a/coast-daemon/src/api/query/project_git.rs +++ b/coast-daemon/src/api/query/project_git.rs @@ -86,10 +86,9 @@ fn load_external_worktree_dirs(project: &str, project_root: &std::path::Path) -> use coast_core::coastfile::Coastfile; let worktree_dirs = load_worktree_dirs_from_live_or_cached(project, project_root); - worktree_dirs - .iter() - .filter(|d| Coastfile::is_external_worktree_dir(d)) - .map(|d| Coastfile::resolve_worktree_dir(project_root, d)) + Coastfile::resolve_external_worktree_dirs_expanded(&worktree_dirs, project_root) + .into_iter() + .map(|d| d.resolved_path) .collect() } diff --git a/coast-daemon/src/git_watcher.rs b/coast-daemon/src/git_watcher.rs index 4d0f71b..5fe120e 100644 --- a/coast-daemon/src/git_watcher.rs +++ b/coast-daemon/src/git_watcher.rs @@ -87,31 +87,36 @@ async fn list_worktree_dirs(project_root: &Path, wt_dir_names: &[String]) -> Opt let mut found_any = false; let git_dir = project_root.join(".git"); + let expanded_external = + Coastfile::resolve_external_worktree_dirs_expanded(wt_dir_names, project_root); + for wt_dir_name in wt_dir_names { if Coastfile::is_external_worktree_dir(wt_dir_name) { - let resolved = Coastfile::resolve_worktree_dir(project_root, wt_dir_name); - let found = scan_external_worktree_dir(&resolved, &git_dir).await; - if !found.is_empty() { - found_any = true; - names.extend(found); - } - } else { - let wt_path = project_root.join(wt_dir_name); - let Ok(mut entries) = tokio::fs::read_dir(&wt_path).await else { - continue; - }; - found_any = true; - while let Ok(Some(entry)) = entries.next_entry().await { - if let Ok(ft) = entry.file_type().await { - if ft.is_dir() { - if let Some(name) = entry.file_name().to_str() { - names.push(name.to_string()); - } + continue; // handled via expanded_external below + } + let wt_path = project_root.join(wt_dir_name); + let Ok(mut entries) = tokio::fs::read_dir(&wt_path).await else { + continue; + }; + found_any = true; + while let Ok(Some(entry)) = entries.next_entry().await { + if let Ok(ft) = entry.file_type().await { + if ft.is_dir() { + if let Some(name) = entry.file_name().to_str() { + names.push(name.to_string()); } } } } } + + for ext_dir in &expanded_external { + let found = scan_external_worktree_dir(&ext_dir.resolved_path, &git_dir).await; + if !found.is_empty() { + found_any = true; + names.extend(found); + } + } if !found_any { return None; } diff --git a/coast-daemon/src/handlers/assign/services.rs b/coast-daemon/src/handlers/assign/services.rs index be3b183..df13eb8 100644 --- a/coast-daemon/src/handlers/assign/services.rs +++ b/coast-daemon/src/handlers/assign/services.rs @@ -328,18 +328,8 @@ async fn find_worktree_in_external_dirs( ) -> Option { use coast_core::coastfile::Coastfile; - let external_dirs: Vec<(usize, String, std::path::PathBuf)> = worktree_dirs - .iter() - .enumerate() - .filter(|(_, d)| Coastfile::is_external_worktree_dir(d)) - .map(|(idx, d)| { - ( - idx, - d.clone(), - Coastfile::resolve_worktree_dir(project_root, d), - ) - }) - .collect(); + let external_dirs = + Coastfile::resolve_external_worktree_dirs_expanded(worktree_dirs, project_root); if external_dirs.is_empty() { return None; @@ -462,7 +452,7 @@ enum MatchMode { fn match_porcelain_to_external( porcelain: &str, worktree_name: &str, - external_dirs: &[(usize, String, std::path::PathBuf)], + external_dirs: &[coast_core::coastfile::ResolvedExternalDir], ) -> Option { let entries = parse_porcelain_entries(porcelain); @@ -499,7 +489,7 @@ fn try_match_external_worktree( line: &str, wt_path: &std::path::Path, worktree_name: &str, - external_dirs: &[(usize, String, std::path::PathBuf)], + external_dirs: &[coast_core::coastfile::ResolvedExternalDir], mode: MatchMode, ) -> Option { use coast_core::coastfile::Coastfile; @@ -514,8 +504,11 @@ fn try_match_external_worktree( wt_path.file_name().and_then(|n| n.to_str()).unwrap_or("") }; - for (idx, raw_dir, resolved) in external_dirs { - let canon_ext = resolved.canonicalize().unwrap_or_else(|_| resolved.clone()); + for ext_dir in external_dirs { + let canon_ext = ext_dir + .resolved_path + .canonicalize() + .unwrap_or_else(|_| ext_dir.resolved_path.clone()); if !wt_canonical.starts_with(&canon_ext) { continue; } @@ -528,10 +521,10 @@ fn try_match_external_worktree( MatchMode::BranchOnly => branch_name == worktree_name, }; if matches { - let ext_mount = Coastfile::external_mount_path(*idx); + let ext_mount = Coastfile::external_mount_path(ext_dir.mount_index); let mount_src = format!("{ext_mount}/{relative_str}"); return Some(WorktreeLocation { - wt_dir: raw_dir.clone(), + wt_dir: ext_dir.raw_pattern.clone(), host_path: wt_canonical, container_mount_src: mount_src, }); @@ -2088,7 +2081,11 @@ mod tests { wt_b.display(), ); - let external_dirs = vec![(0_usize, "~/ext".to_string(), ext_path)]; + let external_dirs = vec![coast_core::coastfile::ResolvedExternalDir { + mount_index: 0, + raw_pattern: "~/ext".to_string(), + resolved_path: ext_path, + }]; let loc = match_porcelain_to_external(&porcelain, "foo", &external_dirs); assert!(loc.is_some(), "should find a match"); @@ -2111,7 +2108,11 @@ mod tests { let porcelain = format!("worktree {}\nbranch refs/heads/my-branch\n\n", wt.display(),); - let external_dirs = vec![(0_usize, "~/ext".to_string(), ext_path)]; + let external_dirs = vec![coast_core::coastfile::ResolvedExternalDir { + mount_index: 0, + raw_pattern: "~/ext".to_string(), + resolved_path: ext_path, + }]; // No directory match for "my-branch", but branch matches. let loc = match_porcelain_to_external(&porcelain, "my-branch", &external_dirs); diff --git a/coast-daemon/src/handlers/run/provision.rs b/coast-daemon/src/handlers/run/provision.rs index fb0e8d0..c53c756 100644 --- a/coast-daemon/src/handlers/run/provision.rs +++ b/coast-daemon/src/handlers/run/provision.rs @@ -27,7 +27,7 @@ pub(super) struct ProvisionResult { } type PreAllocatedPort = (String, u16, u16); -type ExternalWorktreeDir = (usize, std::path::PathBuf); +type ExternalWorktreeDir = coast_core::coastfile::ResolvedExternalDir; type DindContainerManager = coast_docker::container::ContainerManager; @@ -510,7 +510,10 @@ async fn create_container( let external_worktree_dirs = if let Ok(cf) = coast_core::coastfile::Coastfile::from_file(&artifact_dir_path.join("coastfile.toml")) { - cf.external_worktree_dirs() + coast_core::coastfile::Coastfile::resolve_external_worktree_dirs_expanded( + &cf.worktree_dirs, + &cf.project_root, + ) } else { Vec::new() }; @@ -591,10 +594,12 @@ fn build_container_config( }); append_shared_caddy_pki_bind_mount(&mut config, ctx.shared_caddy_pki_host_dir); - for (idx, resolved) in ctx.external_worktree_dirs { + for ext_dir in ctx.external_worktree_dirs { config.bind_mounts.push(coast_docker::runtime::BindMount { - host_path: resolved.clone(), - container_path: coast_core::coastfile::Coastfile::external_mount_path(*idx), + host_path: ext_dir.resolved_path.clone(), + container_path: coast_core::coastfile::Coastfile::external_mount_path( + ext_dir.mount_index, + ), read_only: false, propagation: None, }); diff --git a/coast-daemon/src/handlers/start.rs b/coast-daemon/src/handlers/start.rs index 8e9c3e8..b34f4c1 100644 --- a/coast-daemon/src/handlers/start.rs +++ b/coast-daemon/src/handlers/start.rs @@ -455,14 +455,14 @@ fn compute_start_mount_src( } } - // Phase 2: External worktree dirs (directory + branch match). + // Phase 2: External worktree dirs (directory + branch match, with glob expansion). if let Some(ref root) = project_root { - for (idx, dir) in worktree_dirs.iter().enumerate() { - if Coastfile::is_external_worktree_dir(dir) { - let resolved = Coastfile::resolve_worktree_dir(root, dir); - if let Some(mount) = find_external_wt_mount_src(root, &resolved, idx, wt) { - return mount; - } + let expanded = Coastfile::resolve_external_worktree_dirs_expanded(&worktree_dirs, root); + for ext_dir in &expanded { + if let Some(mount) = + find_external_wt_mount_src(root, &ext_dir.resolved_path, ext_dir.mount_index, wt) + { + return mount; } } } diff --git a/coast-guard/src/components/DocsSidebar.tsx b/coast-guard/src/components/DocsSidebar.tsx index 9ee6f82..f3cec13 100644 --- a/coast-guard/src/components/DocsSidebar.tsx +++ b/coast-guard/src/components/DocsSidebar.tsx @@ -77,6 +77,7 @@ const DOC_TITLE_KEYS: Record = { 'harnesses/CURSOR.md': 'docs.nav.harnessCursor', 'harnesses/CONDUCTOR.md': 'docs.nav.harnessConductor', 'harnesses/T3_CODE.md': 'docs.nav.harnessT3Code', + 'harnesses/SHEP.md': 'docs.nav.harnessShep', 'harnesses/MULTIPLE_HARNESSES.md': 'docs.nav.harnessMultipleHarnesses', 'recipes/README.md': 'docs.nav.recipes', 'recipes/FULLSTACK_MONOREPO.md': 'docs.nav.recipesFullstackMonorepo', diff --git a/coast-guard/src/generated/doc-order.json b/coast-guard/src/generated/doc-order.json index b768e19..b1c5ec8 100644 --- a/coast-guard/src/generated/doc-order.json +++ b/coast-guard/src/generated/doc-order.json @@ -17,46 +17,47 @@ "harnesses/CLAUDE_CODE.md": 160, "harnesses/CURSOR.md": 170, "harnesses/T3_CODE.md": 180, - "harnesses/MULTIPLE_HARNESSES.md": 190, - "concepts_and_terminology/README.md": 200, - "concepts_and_terminology/COASTS.md": 210, - "concepts_and_terminology/RUN.md": 220, - "concepts_and_terminology/REMOVE.md": 230, - "concepts_and_terminology/FILESYSTEM.md": 240, - "concepts_and_terminology/DAEMON.md": 250, - "concepts_and_terminology/CLI.md": 260, - "concepts_and_terminology/COASTGUARD.md": 270, - "concepts_and_terminology/PORTS.md": 280, - "concepts_and_terminology/PRIMARY_PORT_AND_DNS.md": 290, - "concepts_and_terminology/ASSIGN.md": 300, - "concepts_and_terminology/CHECKOUT.md": 310, - "concepts_and_terminology/LOOKUP.md": 320, - "concepts_and_terminology/VOLUMES.md": 330, - "concepts_and_terminology/SHARED_SERVICES.md": 340, - "concepts_and_terminology/SECRETS.md": 350, - "concepts_and_terminology/BUILDS.md": 360, - "concepts_and_terminology/COASTFILE_TYPES.md": 370, - "concepts_and_terminology/RUNTIMES_AND_SERVICES.md": 380, - "concepts_and_terminology/BARE_SERVICES.md": 390, - "concepts_and_terminology/MIXED_SERVICE_TYPES.md": 400, - "concepts_and_terminology/LOGS.md": 410, - "concepts_and_terminology/EXEC_AND_DOCKER.md": 420, - "concepts_and_terminology/AGENT_SHELLS.md": 430, - "concepts_and_terminology/MCP_SERVERS.md": 440, - "concepts_and_terminology/PERFORMANCE_OPTIMIZATIONS.md": 450, - "concepts_and_terminology/TROUBLESHOOTING.md": 460, - "coastfiles/README.md": 470, - "coastfiles/PROJECT.md": 480, - "coastfiles/WORKTREE_DIR.md": 490, - "coastfiles/PORTS.md": 500, - "coastfiles/SHARED_SERVICES.md": 510, - "coastfiles/SERVICES.md": 520, - "coastfiles/SECRETS.md": 530, - "coastfiles/VOLUMES.md": 540, - "coastfiles/ASSIGN.md": 550, - "coastfiles/INHERITANCE.md": 560, - "coastfiles/AGENT_SHELL.md": 570, - "coastfiles/MCP.md": 580, - "recipes/README.md": 590, - "recipes/FULLSTACK_MONOREPO.md": 600 + "harnesses/SHEP.md": 190, + "harnesses/MULTIPLE_HARNESSES.md": 200, + "concepts_and_terminology/README.md": 210, + "concepts_and_terminology/COASTS.md": 220, + "concepts_and_terminology/RUN.md": 230, + "concepts_and_terminology/REMOVE.md": 240, + "concepts_and_terminology/FILESYSTEM.md": 250, + "concepts_and_terminology/DAEMON.md": 260, + "concepts_and_terminology/CLI.md": 270, + "concepts_and_terminology/COASTGUARD.md": 280, + "concepts_and_terminology/PORTS.md": 290, + "concepts_and_terminology/PRIMARY_PORT_AND_DNS.md": 300, + "concepts_and_terminology/ASSIGN.md": 310, + "concepts_and_terminology/CHECKOUT.md": 320, + "concepts_and_terminology/LOOKUP.md": 330, + "concepts_and_terminology/VOLUMES.md": 340, + "concepts_and_terminology/SHARED_SERVICES.md": 350, + "concepts_and_terminology/SECRETS.md": 360, + "concepts_and_terminology/BUILDS.md": 370, + "concepts_and_terminology/COASTFILE_TYPES.md": 380, + "concepts_and_terminology/RUNTIMES_AND_SERVICES.md": 390, + "concepts_and_terminology/BARE_SERVICES.md": 400, + "concepts_and_terminology/MIXED_SERVICE_TYPES.md": 410, + "concepts_and_terminology/LOGS.md": 420, + "concepts_and_terminology/EXEC_AND_DOCKER.md": 430, + "concepts_and_terminology/AGENT_SHELLS.md": 440, + "concepts_and_terminology/MCP_SERVERS.md": 450, + "concepts_and_terminology/PERFORMANCE_OPTIMIZATIONS.md": 460, + "concepts_and_terminology/TROUBLESHOOTING.md": 470, + "coastfiles/README.md": 480, + "coastfiles/PROJECT.md": 490, + "coastfiles/WORKTREE_DIR.md": 500, + "coastfiles/PORTS.md": 510, + "coastfiles/SHARED_SERVICES.md": 520, + "coastfiles/SERVICES.md": 530, + "coastfiles/SECRETS.md": 540, + "coastfiles/VOLUMES.md": 550, + "coastfiles/ASSIGN.md": 560, + "coastfiles/INHERITANCE.md": 570, + "coastfiles/AGENT_SHELL.md": 580, + "coastfiles/MCP.md": 590, + "recipes/README.md": 600, + "recipes/FULLSTACK_MONOREPO.md": 610 } diff --git a/coast-guard/src/generated/docs-manifest.json b/coast-guard/src/generated/docs-manifest.json index 01cc3f1..59e847f 100644 --- a/coast-guard/src/generated/docs-manifest.json +++ b/coast-guard/src/generated/docs-manifest.json @@ -261,6 +261,11 @@ "path": "harnesses/MULTIPLE_HARNESSES.md", "type": "file" }, + { + "name": "SHEP.md", + "path": "harnesses/SHEP.md", + "type": "file" + }, { "name": "T3_CODE.md", "path": "harnesses/T3_CODE.md", @@ -341,10 +346,10 @@ "files": { "README.md": "# Coasts Documentation\n\n```youtube\nMBGKSKau4sU\nPart of the [Coasts Video Course](learn-coasts-videos/README.md).\n```\n\n## Installing\n\n- `curl -fsSL https://coasts.dev/install | sh`\n- `coast daemon install`\n\n*If you decide not to run `coast daemon install`, you are responsible for starting the daemon manually with `coast daemon start` every single time.*\n\n## What Are Coasts?\n\nA Coast (**containerized host**) is a local development runtime. Coasts let you run multiple isolated environments for the same project on one machine.\n\nCoasts are especially useful for complex `docker-compose` stacks with many interdependent services, but they are equally effective for non-containerized local dev setups. Coasts support a wide range of [runtime configuration patterns](concepts_and_terminology/RUNTIMES_AND_SERVICES.md) so you can shape the ideal environment for multiple agents working in parallel.\n\nCoasts are built for local development, not as a hosted cloud service. Your environments run locally on your machine.\n\nThe Coasts project is free, local, MIT-licensed, agent-provider agnostic, and agent-harness agnostic software with no AI upsells.\n\nCoasts work with any agentic coding workflow that uses worktrees. No special harness-side configuration is required.\n\n## Why Coasts for Worktrees\n\nGit worktrees are excellent for isolating code changes, but they do not solve runtime isolation by themselves.\n\nWhen you run multiple worktrees in parallel, you quickly hit ergonomic problems:\n\n- [Port conflicts](concepts_and_terminology/PORTS.md) between services that expect the same host ports.\n- Per-worktree database and [volume setup](concepts_and_terminology/VOLUMES.md) that is tedious to manage.\n- Integration test environments that need custom runtime wiring per worktree.\n- The living hell of switching worktrees and rebuilding runtime context each time. See [Assign and Unassign](concepts_and_terminology/ASSIGN.md).\n\nIf Git is version control for your code, Coasts are like Git for your worktree runtimes.\n\nEach environment gets its own ports, so you can inspect any worktree runtime in parallel. When you [check out](concepts_and_terminology/CHECKOUT.md) a worktree runtime, Coasts remap that runtime to your project's canonical ports.\n\nCoasts abstract runtime configuration into a simple modular layer on top of worktrees, so each worktree can run with the isolation it needs without hand-maintaining complex per-worktree setup.\n\n## Requirements\n\n- macOS or Linux\n- Docker Desktop on macOS, or Docker Engine with the Compose plugin on Linux\n- A project using Git\n- Node.js\n- `socat` (`brew install socat` on macOS, `sudo apt install socat` on Ubuntu)\n\n```text\nLinux note: Dynamic ports work out of the box on Linux.\nIf you need canonical ports below `1024`, see the checkout docs for the required host configuration.\n```\n\n## Containerizing Agents?\n\nYou can containerize an agent with a Coast. That might sound like a great idea at first, but in many cases you do not actually need to run your coding agent inside a container.\n\nBecause Coasts share the [filesystem](concepts_and_terminology/FILESYSTEM.md) with your host machine through a shared volume mount, the easiest and most reliable workflow is to run the agent on your host and instruct it to execute runtime-heavy tasks (such as integration tests) inside the Coast instance using [`coast exec`](concepts_and_terminology/EXEC_AND_DOCKER.md).\n\nHowever, if you do want to run your agent in a container, Coasts absolutely support that via [Agent Shells](concepts_and_terminology/AGENT_SHELLS.md). You can build an incredibly intricate rig for this setup including [MCP server configuration](concepts_and_terminology/MCP_SERVERS.md), but it may not interoperate cleanly with the orchestration software that exists today. For most workflows, host-side agents are simpler and more reliable.\n\n## Coasts vs Dev Containers\n\nCoasts are not dev containers, and they are not the same thing.\n\nDev containers are generally designed for mounting an IDE into a single containerized development workspace. Coasts are headless and optimized as lightweight environments for parallel agent usage with worktrees — multiple isolated, worktree-aware runtime environments running side by side, with fast checkout switching and runtime isolation controls for each instance.\n\n## Demo Repo\n\nIf you want a small example project to try with Coasts, start with the [`coasts-demo` repository](https://github.com/coast-guard/coasts-demo).\n\n## Coasts Video Course\n\nIf you prefer video, the [Coasts Video Course](learn-coasts-videos/README.md) covers every core concept in under three minutes each.\n\n", "GETTING_STARTED.md": "# Getting Started with Coasts\n\n```youtube\nJe921fgJ4RY\nPart of the [Coasts Video Course](learn-coasts-videos/README.md).\n```\n\n## Installing\n\n```bash\ncurl -fsSL https://coasts.dev/install | sh\ncoast daemon install\n```\n\n*If you decide not to run `coast daemon install`, you are responsible for starting the daemon manually with `coast daemon start` every single time.*\n\n## Requirements\n\n- macOS or Linux\n- Docker Desktop on macOS, or Docker Engine with the Compose plugin on Linux\n- A project using Git\n- Node.js\n- `socat` (`brew install socat` on macOS, `sudo apt install socat` on Ubuntu)\n\n```text\nLinux note: Dynamic ports work out of the box on Linux.\nIf you need canonical ports below `1024`, see the checkout docs for the required host configuration.\n```\n\n## Setting Up Coasts in a Project\n\nAdd a Coastfile to the root of your project. Make sure you are not on a worktree when installing.\n\n```text\nmy-project/\n├── Coastfile <-- this is what Coast reads\n├── docker-compose.yml\n├── Dockerfile\n├── src/\n│ └── ...\n└── ...\n```\n\nThe `Coastfile` points at your existing local development resources and adds Coasts-specific configuration — see the [Coastfiles documentation](coastfiles/README.md) for the full schema:\n\n```toml\n[coast]\nname = \"my-project\"\ncompose = \"./docker-compose.yml\"\n\n[ports]\nweb = 3000\ndb = 5432\n```\n\nA Coastfile is a lightweight TOML file that *typically* points to your existing `docker-compose.yml` (it also works with non-containerized local dev setups) and describes the modifications needed to run your project in parallel — port mappings, volume strategies, and secrets. Place it at your project root.\n\nThe fastest way to create a Coastfile for your project is to let your coding agent do it.\n\nThe Coasts CLI ships with a built-in prompt that teaches any AI agent the full Coastfile schema and CLI. Copy it into your agent's chat and it will analyze your project and generate a Coastfile.\n\n```prompt-copy\ninstallation_prompt.txt\n```\n\nYou can also get the same output from the CLI by running `coast installation-prompt`.\n\n## Your First Coast\n\nBefore starting your first Coast, bring down any running development environment. If you are using Docker Compose, run `docker-compose down`. If you have local dev servers running, stop them. Coasts manage their own ports and will conflict with anything already listening.\n\nOnce your Coastfile is ready:\n\n```bash\ncoast build\ncoast run dev-1\n```\n\nCheck that your instance is running:\n\n```bash\ncoast ls\n\n# NAME PROJECT STATUS BRANCH RUNTIME WORKTREE CO ROOT\n# dev-1 my-project running main dind - ~/dev/my-project\n```\n\nSee where your services are listening:\n\n```bash\ncoast ports dev-1\n\n# SERVICE CANONICAL DYNAMIC\n# ★ web 3000 62217\n# db 5432 55681\n```\n\nEach instance gets its own set of dynamic ports so multiple instances can run side by side. To map an instance back to your project's canonical ports, check it out:\n\n```bash\ncoast checkout dev-1\n```\n\nThis means the runtime is now checked out and your project's canonical ports (like `3000`, `5432`) will route to this Coast instance.\n\n```bash\ncoast ls\n\n# NAME PROJECT STATUS BRANCH RUNTIME WORKTREE CO ROOT\n# dev-1 my-project running main dind - ✓ ~/dev/my-project\n```\n\nTo bring up the Coastguard observability UI for your project:\n\n```bash\ncoast ui\n```\n\n## What's Next?\n\n- Set up a [skill for your host agent](SKILLS_FOR_HOST_AGENTS.md) so it knows how to interact with Coasts\n", - "SKILLS_FOR_HOST_AGENTS.md": "# Skills for Host Agents\n\nIf you use AI coding agents on the host while your app runs inside Coasts, your\nagent usually needs two Coast-specific pieces of setup:\n\n1. an always-on Coast Runtime section in the harness's project instruction file\n or rule file\n2. a reusable Coast workflow skill such as `/coasts` when the harness supports\n project skills\n\nWithout the first piece, the agent edits files but forgets to use `coast exec`.\nWithout the second, every Coast assignment, log, and UI flow has to be\nre-explained in chat.\n\nThis guide keeps the setup concrete and Coast-specific: which file to create,\nwhat text goes in it, and how that changes by harness.\n\n## Why agents need this\n\nCoasts share the [filesystem](concepts_and_terminology/FILESYSTEM.md) between\nyour host machine and the Coast container. Your agent edits files on the host\nand the running services inside the Coast see the changes immediately. But the\nagent still needs to:\n\n1. discover which Coast instance matches the current checkout\n2. run tests, builds, and runtime commands inside that Coast\n3. read logs and service status from the Coast\n4. handle worktree assignment safely when no Coast is already attached\n\n## What goes where\n\n- `AGENTS.md`, `CLAUDE.md`, or `.cursor/rules/coast.md` — short Coast rules\n that should apply on every task, even if no skill is invoked\n- skill (`.agents/skills/...`, `.claude/skills/...`, or `.cursor/skills/...`)\n — the reusable Coast workflow itself, such as `/coasts`\n- command file (`.claude/commands/...` or `.cursor/commands/...`) — optional\n explicit entrypoint for harnesses that support it; one simple option is to\n have the command reuse the skill\n\nIf one repo uses more than one harness, keep the canonical Coast skill in one\nplace and expose it where needed. See\n[Multiple Harnesses](harnesses/MULTIPLE_HARNESSES.md).\n\n## 1. Always-on Coast Runtime rules\n\nAdd the following block to the harness's always-on project instruction file or\nrule file (`AGENTS.md`, `CLAUDE.md`, `.cursor/rules/coast.md`, or equivalent):\n\n```text-copy\n# Coast Runtime\n\nThis project uses Coasts — containerized runtimes for running services, tests,\nand other runtime commands. The filesystem is shared between the host and the\ncontainer, so file edits on either side are visible to both immediately.\n\n## Discovery\n\nBefore the first runtime command in a session, run:\n\n coast lookup\n\nThis prints the instance name, ports, and example commands. Use the instance\nname from the output for all subsequent commands.\n\n## What runs where\n\nThe filesystem is shared, so only use `coast exec` for things that need the\ncontainer runtime (databases, services, integration tests). Everything else\nruns directly on the host.\n\nUse `coast exec` for:\n- Tests that need running services (integration tests, API tests)\n- Service restarts or compose operations\n- Anything that talks to databases, caches, or other container services\n\nRun directly on the host:\n- Linting, typechecking, formatting\n- Git operations\n- Playwright and browser tests\n- Installing host-side dependencies (npm install, pip install)\n- File search, code generation, static analysis\n\nExample:\n\n coast exec -- sh -c \"cd && npm test\" # needs DB\n npm run lint # host is fine\n npx playwright test # host is fine\n\n## Runtime feedback\n\n coast ps \n coast logs --service \n coast logs --service --tail 50\n\n## Creating and assigning Coasts\n\nIf `coast lookup` returns no match, run `coast ls` to see what exists.\n\nIf an unassigned Coast is already running for this project, prefer assigning\nyour worktree to it rather than creating a new one:\n\n coast assign -w \n\nIf no Coast is running, ask the user before creating one — Coasts can be\nmemory intensive:\n\n coast run -w \n\nA project must be built before instances can be created. If `coast run` fails\nbecause no build exists, run `coast build` first.\n\n## Coastfile setup\n\nIf the project does not have a Coastfile yet, or if you need to modify the\nCoastfile, read the Coastfile docs first:\n\n coast docs --path coastfiles/README.md\n\n## When confused\n\nBefore guessing about Coast behavior, explore the docs:\n\n coast docs # list all doc pages\n coast docs --path concepts_and_terminology/RUN.md\n coast docs --path concepts_and_terminology/ASSIGN.md\n coast docs --path concepts_and_terminology/BUILDS.md\n coast search-docs \"your question here\" # semantic search\n\n## Rules\n\n- Always run `coast lookup` before your first runtime command in a session.\n- Use `coast exec` only for things that need the container runtime.\n- Run linting, typechecking, formatting, and git on the host directly.\n- Use `coast docs` or `coast search-docs` before guessing about Coast behavior.\n- Do not run services directly on the host when the project expects Coast.\n```\n\nThis block belongs in the always-on file because the rules should apply on\nevery task, not only when the agent explicitly enters a `/coasts` workflow.\n\n## 2. Reusable `/coasts` skill\n\nWhen the harness supports project skills, save the skill content as a\n`SKILL.md` in your skills directory. The full skill text is in\n[skills_prompt.txt](skills_prompt.txt) (if in CLI mode, use\n`coast skills-prompt`) — everything after the Coast Runtime block is the skill\ncontent, starting from the `---` frontmatter.\n\nIf you are using Codex or OpenAI-specific surfaces, you can optionally add\n`agents/openai.yaml` beside the skill for display metadata or invocation\npolicy. That metadata should live beside the skill, not replace it.\n\n## Harness quick start\n\n| Harness | Always-on file | Reusable Coast workflow | Notes |\n|---------|----------------|-------------------------|-------|\n| OpenAI Codex | `AGENTS.md` | `.agents/skills/coasts/SKILL.md` | No separate project command file to recommend for Coast docs. See [Codex](harnesses/CODEX.md). |\n| Claude Code | `CLAUDE.md` | `.claude/skills/coasts/SKILL.md` | `.claude/commands/coasts.md` is optional, but keep the logic in the skill. See [Claude Code](harnesses/CLAUDE_CODE.md). |\n| Cursor | `AGENTS.md` or `.cursor/rules/coast.md` | `.cursor/skills/coasts/SKILL.md` or shared `.agents/skills/coasts/SKILL.md` | `.cursor/commands/coasts.md` is optional. `.cursor/worktrees.json` is for Cursor worktree bootstrap, not Coast policy. See [Cursor](harnesses/CURSOR.md). |\n| Conductor | `CLAUDE.md` | Start with `CLAUDE.md`; use Conductor scripts and settings for Conductor-specific behavior | Do not assume full Claude Code project command behavior. If a new command does not appear, fully close and reopen Conductor. See [Conductor](harnesses/CONDUCTOR.md). |\n| T3 Code | `AGENTS.md` | `.agents/skills/coasts/SKILL.md` | This is the most limited harness surface here. Use the Codex-style layout and do not invent a T3-native command layer for Coast docs. See [T3 Code](harnesses/T3_CODE.md). |\n\n## Let the agent set itself up\n\nThe fastest way is to let the agent write the right files itself. Copy the\nprompt below into your agent's chat — it includes the Coast Runtime block, the\n`coasts` skill block, and harness-specific instructions for where each piece\nbelongs.\n\n```prompt-copy\nskills_prompt.txt\n```\n\nYou can also get the same output from the CLI by running `coast skills-prompt`.\n\n## Manual setup\n\n- **Codex:** put the Coast Runtime section in `AGENTS.md`, then put the\n reusable `coasts` skill in `.agents/skills/coasts/SKILL.md`.\n- **Claude Code:** put the Coast Runtime section in `CLAUDE.md`, then put the\n reusable `coasts` skill in `.claude/skills/coasts/SKILL.md`. Only add\n `.claude/commands/coasts.md` if you specifically want a command file.\n- **Cursor:** put the Coast Runtime section in `AGENTS.md` if you want the most\n portable instructions, or in `.cursor/rules/coast.md` if you want a\n Cursor-native project rule. Put the reusable `coasts` workflow in\n `.cursor/skills/coasts/SKILL.md` for a Cursor-only repo, or in\n `.agents/skills/coasts/SKILL.md` if the repo is shared with other harnesses.\n Only add `.cursor/commands/coasts.md` if you specifically want an explicit\n command file.\n- **Conductor:** put the Coast Runtime section in `CLAUDE.md`. Use Conductor\n Repository Settings scripts for Conductor-specific bootstrap or run behavior.\n If you add a command and it does not appear, fully close and reopen the app.\n- **T3 Code:** use the same layout as Codex: `AGENTS.md` plus\n `.agents/skills/coasts/SKILL.md`. Treat T3 Code as a thin Codex-style\n harness here, not as a separate Coast command surface.\n- **Multiple harnesses:** keep the canonical skill in\n `.agents/skills/coasts/SKILL.md`. Cursor can load that directly; expose it to\n Claude Code through `.claude/skills/coasts/` if needed.\n\n## Further reading\n\n- Read the [Harnesses guide](harnesses/README.md) for the per-harness matrix\n- Read [Multiple Harnesses](harnesses/MULTIPLE_HARNESSES.md) for the shared\n layout pattern\n- Read the [Coastfiles documentation](coastfiles/README.md) to learn the full\n configuration schema\n- Learn the [Coast CLI](concepts_and_terminology/CLI.md) commands for managing\n instances\n- Explore [Coastguard](concepts_and_terminology/COASTGUARD.md), the web UI for\n observing and controlling your Coasts\n", - "doc_ordering.txt": "# Top-level\nREADME.md\nGETTING_STARTED.md\nSKILLS_FOR_HOST_AGENTS.md\n\n# Learn Coasts\nlearn-coasts-videos/README.md\nlearn-coasts-videos/coasts.md\nlearn-coasts-videos/ports.md\nlearn-coasts-videos/assign.md\nlearn-coasts-videos/checkout.md\nlearn-coasts-videos/volumes.md\nlearn-coasts-videos/secrets.md\nlearn-coasts-videos/getting-started.md\nlearn-coasts-videos/coast-ui.md\n\n# Harnesses\nharnesses/README.md\nharnesses/CODEX.md\nharnesses/CONDUCTOR.md\nharnesses/CLAUDE_CODE.md\nharnesses/CURSOR.md\nharnesses/T3_CODE.md\nharnesses/MULTIPLE_HARNESSES.md\n\n# Concepts and Terminology\nconcepts_and_terminology/README.md\nconcepts_and_terminology/COASTS.md\nconcepts_and_terminology/RUN.md\nconcepts_and_terminology/REMOVE.md\nconcepts_and_terminology/FILESYSTEM.md\nconcepts_and_terminology/DAEMON.md\nconcepts_and_terminology/CLI.md\nconcepts_and_terminology/COASTGUARD.md\nconcepts_and_terminology/PORTS.md\nconcepts_and_terminology/PRIMARY_PORT_AND_DNS.md\nconcepts_and_terminology/ASSIGN.md\nconcepts_and_terminology/CHECKOUT.md\nconcepts_and_terminology/LOOKUP.md\nconcepts_and_terminology/VOLUMES.md\nconcepts_and_terminology/SHARED_SERVICES.md\nconcepts_and_terminology/SECRETS.md\nconcepts_and_terminology/BUILDS.md\nconcepts_and_terminology/COASTFILE_TYPES.md\nconcepts_and_terminology/RUNTIMES_AND_SERVICES.md\nconcepts_and_terminology/BARE_SERVICES.md\nconcepts_and_terminology/MIXED_SERVICE_TYPES.md\nconcepts_and_terminology/LOGS.md\nconcepts_and_terminology/EXEC_AND_DOCKER.md\nconcepts_and_terminology/AGENT_SHELLS.md\nconcepts_and_terminology/MCP_SERVERS.md\nconcepts_and_terminology/PERFORMANCE_OPTIMIZATIONS.md\nconcepts_and_terminology/TROUBLESHOOTING.md\n\n# Coastfiles\ncoastfiles/README.md\ncoastfiles/PROJECT.md\ncoastfiles/WORKTREE_DIR.md\ncoastfiles/PORTS.md\ncoastfiles/SHARED_SERVICES.md\ncoastfiles/SERVICES.md\ncoastfiles/SECRETS.md\ncoastfiles/VOLUMES.md\ncoastfiles/ASSIGN.md\ncoastfiles/INHERITANCE.md\ncoastfiles/AGENT_SHELL.md\ncoastfiles/MCP.md\n\n# Recipes\nrecipes/README.md\nrecipes/FULLSTACK_MONOREPO.md\n\n", - "installation_prompt.txt": "You are installing Coasts into this project. Coast (containerized host) is a CLI tool that runs multiple isolated development environments on a single machine using Docker-in-Docker containers. Each environment gets its own ports, volumes, and runtime — ideal for parallel worktree workflows.\n\nYour job: analyze this project and generate a Coastfile (a TOML file named \"Coastfile\" at the project root).\n\n=== DOCUMENTATION ===\n\nCoast has built-in docs accessible from the CLI. Use these to understand the full Coastfile schema, volume strategies, assign behavior, and other configuration options before generating a Coastfile.\n\nBrowse the docs tree:\n\n coast docs\n\nThis prints the full docs tree. Start by reading the README files — they provide indexes to help you find the right documentation for each topic:\n\n coast docs --path README.md\n coast docs --path coastfiles/README.md\n coast docs --path concepts_and_terminology/README.md\n\nRead a specific doc:\n\n coast docs --path coastfiles/PROJECT.md\n coast docs --path coastfiles/VOLUMES.md\n\nSearch the docs (semantic search — describe what you're looking for in natural language):\n\n coast search-docs \"how do volume strategies work\"\n coast search-docs \"shared postgres across instances\"\n coast search-docs \"secret injection from environment variables\"\n\nUse the docs to make informed decisions about this project's Coastfile configuration. The coastfiles/ section covers every Coastfile directive in detail.\n\n=== COASTFILE SCHEMA (quick reference) ===\n\n[coast] — Required. Project metadata.\n\n name (string, required) Project identifier used in container/volume naming.\n compose (string, optional) Path to docker-compose.yml relative to the Coastfile.\n runtime (string, optional) \"dind\" (default), \"sysbox\", or \"podman\".\n root (string, optional) Project root override (relative or absolute).\n worktree_dir (string, optional) Directory for git worktrees (default: \".worktrees\"). Auto-detected from existing worktrees at runtime.\n\n[coast.setup] — Optional. Customize the DinD container itself.\n\n packages (array of strings) Alpine packages to install (e.g. [\"nodejs\", \"npm\", \"git\"]).\n run (array of strings) Arbitrary commands to run during setup.\n\n[ports] — Required (at least one). Map of logical name to port number.\n These ports are forwarded to the host when the coast is checked out.\n\n Example:\n [ports]\n web = 3000\n api = 8080\n postgres = 5432\n\n[volumes.*] — Optional. Per-volume configuration.\n\n strategy \"isolated\" (default) or \"shared\"\n service Compose service name that owns this volume.\n mount Mount path inside the service container.\n snapshot_source (isolated only) Seed from an existing volume name.\n\n Example:\n [volumes.postgres_data]\n strategy = \"isolated\"\n service = \"db\"\n mount = \"/var/lib/postgresql/data\"\n\n[secrets.*] — Optional. Secret extraction and injection.\n\n extractor \"file\", \"env\", \"command\", or \"macos-keychain\"\n inject \"env:VAR_NAME\" or \"file:/path/in/container\"\n ttl Optional expiry (e.g. \"1h\", \"30m\").\n\n Extractor-specific params:\n file: path = \"./path/to/secret\"\n env: var = \"HOST_ENV_VAR\"\n command: run = \"echo secret-value\"\n macos-keychain: item = \"keychain-item-name\"\n\n Example:\n [secrets.db_password]\n extractor = \"env\"\n var = \"DB_PASSWORD\"\n inject = \"env:DATABASE_PASSWORD\"\n\n[inject] — Optional. Non-secret host file/env injection.\n\n env Array of host env var names to forward.\n files Array of host file paths to mount.\n\n Example:\n [inject]\n env = [\"NODE_ENV\", \"DEBUG\"]\n files = [\"~/.ssh/id_ed25519\", \"~/.gitconfig\"]\n\n[shared_services.*] — Optional. Services on the host Docker daemon shared across instances.\n\n image Docker image.\n ports Array of port numbers.\n volumes Array of volume mounts.\n env Inline table of environment variables.\n auto_create_db (bool) Create a per-instance database automatically.\n inject Inject connection string into coast containers.\n\n Example:\n [shared_services.postgres]\n image = \"postgres:16-alpine\"\n ports = [5432]\n volumes = [\"postgres_data:/var/lib/postgresql/data\"]\n env = { POSTGRES_USER = \"dev\", POSTGRES_PASSWORD = \"dev\", POSTGRES_DB = \"app\" }\n auto_create_db = true\n inject = \"env:DATABASE_URL\"\n\n[assign] — Optional. Controls what happens on branch switch (coast assign).\n\n default \"none\", \"restart\", or \"rebuild\"\n [assign.services] Per-service overrides.\n [assign.rebuild_triggers] Per-service file globs that trigger rebuild.\n\n Example:\n [assign]\n default = \"none\"\n [assign.services]\n api = \"restart\"\n worker = \"rebuild\"\n [assign.rebuild_triggers]\n worker = [\"Dockerfile\", \"package.json\"]\n\n[services.*] — Optional. Bare process services (no docker-compose needed).\n\n command Shell command to run.\n port Port number.\n restart \"on-failure\" or \"always\".\n\n Example:\n [services.web]\n command = \"node server.js\"\n port = 3000\n restart = \"on-failure\"\n\n=== EXAMPLE: Minimal (no compose) ===\n\n[coast]\nname = \"my-app\"\nruntime = \"dind\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\"]\n\n[ports]\napp = 3000\n\n=== EXAMPLE: With docker-compose ===\n\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nruntime = \"dind\"\n\n[ports]\napp = 3000\npostgres = 5432\nredis = 6379\n\n[volumes.postgres_data]\nstrategy = \"shared\"\nservice = \"db\"\nmount = \"/var/lib/postgresql/data\"\n\n[volumes.redis_data]\nstrategy = \"isolated\"\nservice = \"cache\"\nmount = \"/data\"\n\n[assign]\ndefault = \"none\"\n\n[assign.services]\napp = \"rebuild\"\n\n=== EXAMPLE: With secrets ===\n\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nruntime = \"dind\"\n\n[ports]\napp = 3000\n\n[secrets.api_key]\nextractor = \"env\"\nvar = \"API_KEY\"\ninject = \"env:API_KEY\"\n\n[secrets.ssh_key]\nextractor = \"file\"\npath = \"~/.ssh/id_ed25519\"\ninject = \"file:/run/secrets/ssh_key\"\n\n=== KEY TRADEOFFS TO DISCUSS WITH THE USER ===\n\nBefore generating the Coastfile, ask the user about any ambiguous configuration choices. Here are the main ones:\n\nDatabase and infrastructure strategy — there are three options for services like postgres and redis:\n - Isolated volumes (default): each Coast instance gets its own copy of the data inside its DinD container. Instances cannot interfere with each other. Best when you want per-branch database state.\n - Shared volumes: all instances read and write the same volume inside their DinD containers. Saves disk space but concurrent writes from multiple instances can corrupt data.\n - Shared services: run the database on the host Docker daemon instead of inside each Coast. All instances connect to one shared server. Uses the least memory, supports auto_create_db for per-instance databases on a single postgres, and data outlives instance deletion. Best for large teams or memory-constrained machines.\n - If the project has a database, ask the user which approach they want. Explain the tradeoffs — isolated is safest, shared services is most memory-efficient.\n\nAssign strategy — what happens when switching a Coast between worktrees:\n - \"none\": do nothing (for services like postgres/redis that don't change between branches).\n - \"restart\": restart the container (for interpreted services that just need a process restart).\n - \"rebuild\": rebuild the Docker image and restart (for services where the branch change affects the Dockerfile or build dependencies).\n - If the project has multiple services, ask which ones need rebuilding vs restarting on branch switch.\n\n=== INSTRUCTIONS ===\n\n1. Look at this project's structure. If there is a docker-compose.yml, read it to identify services, ports, and volumes.\n2. Detect the existing git worktree directory. Run `git worktree list` to check if the project already has git worktrees set up.\n - If worktrees exist, examine their paths to determine the common parent directory (e.g., if worktrees are at `../.worktrees/feat-a` and `../.worktrees/feat-b`, the worktree_dir is `\"../.worktrees\"`).\n - Set `worktree_dir` in the Coastfile to match the detected directory.\n - If no worktrees exist, omit `worktree_dir` (Coast defaults to \".worktrees\"). Do NOT use \".coasts\" — that pollutes the project with a Coast-branded directory.\n3. Read the relevant Coast docs (use `coast docs` and `coast search-docs`) to understand volume strategies, assign behavior, and any configuration options that apply to this project's stack.\n4. Ask the user about any ambiguous configuration choices (see tradeoffs above). Do not guess — explain the options and let them decide.\n5. Generate a Coastfile at the project root based on the project analysis and user input.\n6. If the project has no docker-compose.yml, use [services.*] for bare process definitions or [coast.setup] to install dependencies.\n7. Run `coast build`. If it fails, check the error and consult the docs (`coast search-docs \"\"`) to troubleshoot.\n8. Run `coast run dev-1`. If it fails, check the error and consult the docs.\n9. Run `coast ui` to open the Coastguard dashboard (this is for the user when you are done).\n", - "skills_prompt.txt": "# Coast Runtime\n\nThis project uses Coasts — containerized runtimes for running services, tests,\nand other runtime commands. The filesystem is shared between the host and the\ncontainer, so file edits on either side are visible to both immediately.\n\n## Discovery\n\nBefore the first runtime command in a session, run:\n\n coast lookup\n\nThis prints the instance name, ports, and example commands. Use the instance\nname from the output for all subsequent commands.\n\n## What runs where\n\nThe filesystem is shared, so only use `coast exec` for things that need the\ncontainer runtime (databases, services, integration tests). Everything else\nruns directly on the host.\n\nUse `coast exec` for:\n- Tests that need running services (unit tests that are integrated with services or dbs, integration tests, API tests)\n- Service restarts or compose operations\n- Anything that talks to databases, caches, or other container services\n\nRun directly on the host:\n- Linting, typechecking, formatting\n- Git operations\n- Playwright and browser tests\n- Installing host-side dependencies (npm install, pip install)\n- File search, code generation, static analysis\n\nExample:\n\n coast exec -- sh -c \"cd && npm test\" # needs DB\n npm run lint # host is fine\n npx playwright test # host is fine\n\n## Runtime feedback\n\n coast ps \n coast logs --service \n coast logs --service --tail 50\n\n## Creating and assigning Coasts\n\nIf `coast lookup` returns no match, run `coast ls` to see what exists.\n\nIf an unassigned Coast is already running for this project, prefer assigning\nyour worktree to it rather than creating a new one:\n\n coast assign -w \n\nAn already occupied Coast can also be reassigned with `coast assign`, but check\nwith the user first because that will disrupt the current slot.\n\nIf no Coast is running, ask the user before creating one — Coasts can be\nmemory intensive:\n\n coast run -w \n\nA project must be built before instances can be created. If `coast run` fails\nbecause no build exists, run `coast build` first.\n\n## Coastfile setup\n\nIf the project does not have a Coastfile yet, or if you need to modify the\nCoastfile, read the Coastfile docs first:\n\n coast docs --path coastfiles/README.md\n\n## When confused\n\nBefore guessing about Coast behavior, explore the docs:\n\n coast docs # list all doc pages\n coast docs --path concepts_and_terminology/RUN.md\n coast docs --path concepts_and_terminology/ASSIGN.md\n coast docs --path concepts_and_terminology/BUILDS.md\n coast search-docs \"your question here\" # semantic search\n\n## Rules\n\n- Always run `coast lookup` before your first runtime command in a session.\n- Use `coast exec` only for things that need the container runtime.\n- Run linting, typechecking, formatting, and git on the host directly.\n- Use `coast docs` or `coast search-docs` before guessing about Coast behavior.\n- Do not run services directly on the host when the project expects Coast.\n\n---\nname: coasts\ndescription: Inspect and control Coast instances for the current checkout. Use\n when the user says \"/coasts\", asks to assign or reassign a Coast, wants to\n run commands or read logs in the matching Coast, wants to create a new Coast,\n or explicitly asks to open Coast UI.\n---\n\n# Coasts\n\nUse the Coast CLI directly. Do not add wrappers.\n\n## Orient Yourself\n\nStart by exploring the CLI and docs:\n\n coast # see all available commands\n coast docs # list all doc pages\n coast search-docs \"your question\" # semantic search\n\nWhen anything about Coast behavior is unclear, read the docs before guessing:\n\n coast docs --path concepts_and_terminology/RUN.md\n coast docs --path concepts_and_terminology/BUILDS.md\n coast docs --path concepts_and_terminology/ASSIGN.md\n coast docs --path concepts_and_terminology/PORTS.md\n coast docs --path coastfiles/README.md\n\n## Quick Start\n\nRoute requests into one of these modes:\n\n1. **Use Coast** — run `coast lookup`, then use `coast exec`, `coast ps`,\n or `coast logs` with the matching instance.\n2. **Create or Assign** — run `coast ls`, then `coast run` to create a new\n Coast or `coast assign` to repoint an existing one.\n3. **Open UI** — run `coast ui`.\n\n## What Runs Where\n\nThe host and the Coast share the filesystem. Only use `coast exec` for things\nthat need running services inside the container.\n\n**Use `coast exec` for:**\n- Integration tests, API tests, anything that needs databases or services\n- Service restarts, compose operations\n- Commands that talk to container-only processes\n\n**Run on the host:**\n- Linting (`eslint`, `rubocop`, `golangci-lint`)\n- Typechecking (`tsc --noEmit`, `go vet`)\n- Formatting (`prettier`, `gofmt`)\n- Git operations\n- Playwright and browser tests\n- Static analysis, code generation\n- Package installs (`npm install`, `pip install`)\n\n## Create and Assign\n\nWhen `coast lookup` returns no match:\n\n1. Run `coast ls` to see available slots.\n2. Prefer `coast run -w ` to create and assign in one step.\n3. If no build exists yet, run `coast build` first.\n4. After creating, rerun `coast lookup` to confirm.\n\nWhen you want to switch an existing Coast to a different worktree:\n\n coast assign -w \n\nThat also works for an already assigned or checked-out Coast, but ask the user\nfirst before reassigning an occupied slot.\n\n## Coastfile Setup\n\nIf the project needs a new or modified Coastfile, read the docs first:\n\n coast docs --path coastfiles/README.md\n\nThe Coastfile docs cover compose setup, ports, volumes, secrets, shared\nservices, bare services, and inheritance.\n\n## Safety Rules\n\n- Run `coast lookup` before taking action and again after any topology change.\n- Ask before `coast assign`, `coast unassign`, or `coast checkout` if it would\n disrupt an existing slot.\n- Prefer creating a new Coast over reusing a checked-out or already-assigned\n one unless the user explicitly wants the existing slot to be reassigned.\n- Use `coast docs` or `coast search-docs` before guessing.\n\n", + "SKILLS_FOR_HOST_AGENTS.md": "# Skills for Host Agents\n\nIf you use AI coding agents on the host while your app runs inside Coasts, your\nagent usually needs two Coast-specific pieces of setup:\n\n1. an always-on Coast Runtime section in the harness's project instruction file\n or rule file\n2. a reusable Coast workflow skill such as `/coasts` when the harness supports\n project skills\n\nWithout the first piece, the agent edits files but forgets to use `coast exec`.\nWithout the second, every Coast assignment, log, and UI flow has to be\nre-explained in chat.\n\nThis guide keeps the setup concrete and Coast-specific: which file to create,\nwhat text goes in it, and how that changes by harness.\n\n## Why agents need this\n\nCoasts share the [filesystem](concepts_and_terminology/FILESYSTEM.md) between\nyour host machine and the Coast container. Your agent edits files on the host\nand the running services inside the Coast see the changes immediately. But the\nagent still needs to:\n\n1. discover which Coast instance matches the current checkout\n2. run tests, builds, and runtime commands inside that Coast\n3. read logs and service status from the Coast\n4. handle worktree assignment safely when no Coast is already attached\n\n## What goes where\n\n- `AGENTS.md`, `CLAUDE.md`, or `.cursor/rules/coast.md` — short Coast rules\n that should apply on every task, even if no skill is invoked\n- skill (`.agents/skills/...`, `.claude/skills/...`, or `.cursor/skills/...`)\n — the reusable Coast workflow itself, such as `/coasts`\n- command file (`.claude/commands/...` or `.cursor/commands/...`) — optional\n explicit entrypoint for harnesses that support it; one simple option is to\n have the command reuse the skill\n\nIf one repo uses more than one harness, keep the canonical Coast skill in one\nplace and expose it where needed. See\n[Multiple Harnesses](harnesses/MULTIPLE_HARNESSES.md).\n\n## 1. Always-on Coast Runtime rules\n\nAdd the following block to the harness's always-on project instruction file or\nrule file (`AGENTS.md`, `CLAUDE.md`, `.cursor/rules/coast.md`, or equivalent):\n\n```text-copy\n# Coast Runtime\n\nThis project uses Coasts — containerized runtimes for running services, tests,\nand other runtime commands. The filesystem is shared between the host and the\ncontainer, so file edits on either side are visible to both immediately.\n\n## Discovery\n\nBefore the first runtime command in a session, run:\n\n coast lookup\n\nThis prints the instance name, ports, and example commands. Use the instance\nname from the output for all subsequent commands.\n\n## What runs where\n\nThe filesystem is shared, so only use `coast exec` for things that need the\ncontainer runtime (databases, services, integration tests). Everything else\nruns directly on the host.\n\nUse `coast exec` for:\n- Tests that need running services (integration tests, API tests)\n- Service restarts or compose operations\n- Anything that talks to databases, caches, or other container services\n\nRun directly on the host:\n- Linting, typechecking, formatting\n- Git operations\n- Playwright and browser tests\n- Installing host-side dependencies (npm install, pip install)\n- File search, code generation, static analysis\n\nExample:\n\n coast exec -- sh -c \"cd && npm test\" # needs DB\n coast exec --service # service shell\n npm run lint # host is fine\n npx playwright test # host is fine\n\n## Runtime feedback\n\n coast ps \n coast logs --service \n coast logs --service --tail 50\n\n## Creating and assigning Coasts\n\nIf `coast lookup` returns no match, run `coast ls` to see what exists.\n\nIf an unassigned Coast is already running for this project, prefer assigning\nyour worktree to it rather than creating a new one:\n\n coast assign -w \n\nIf no Coast is running, ask the user before creating one — Coasts can be\nmemory intensive:\n\n coast run -w \n\nA project must be built before instances can be created. If `coast run` fails\nbecause no build exists, run `coast build` first.\n\n## Coastfile setup\n\nIf the project does not have a Coastfile yet, or if you need to modify the\nCoastfile, read the Coastfile docs first:\n\n coast docs --path coastfiles/README.md\n\n## When confused\n\nBefore guessing about Coast behavior, explore the docs:\n\n coast docs # list all doc pages\n coast docs --path concepts_and_terminology/RUN.md\n coast docs --path concepts_and_terminology/ASSIGN.md\n coast docs --path concepts_and_terminology/BUILDS.md\n coast search-docs \"your question here\" # semantic search\n\n## Rules\n\n- Always run `coast lookup` before your first runtime command in a session.\n- Use `coast exec` only for things that need the container runtime.\n- Use `coast exec --service ` when you need to run inside an app/service container.\n- Run linting, typechecking, formatting, and git on the host directly.\n- Use `coast docs` or `coast search-docs` before guessing about Coast behavior.\n- Do not run services directly on the host when the project expects Coast.\n```\n\nThis block belongs in the always-on file because the rules should apply on\nevery task, not only when the agent explicitly enters a `/coasts` workflow.\n\n## 2. Reusable `/coasts` skill\n\nWhen the harness supports project skills, save the skill content as a\n`SKILL.md` in your skills directory. The full skill text is in\n[skills_prompt.txt](skills_prompt.txt) (if in CLI mode, use\n`coast skills-prompt`) — everything after the Coast Runtime block is the skill\ncontent, starting from the `---` frontmatter.\n\nIf you are using Codex or OpenAI-specific surfaces, you can optionally add\n`agents/openai.yaml` beside the skill for display metadata or invocation\npolicy. That metadata should live beside the skill, not replace it.\n\n## Harness quick start\n\n| Harness | Always-on file | Reusable Coast workflow | Notes |\n|---------|----------------|-------------------------|-------|\n| OpenAI Codex | `AGENTS.md` | `.agents/skills/coasts/SKILL.md` | No separate project command file to recommend for Coast docs. See [Codex](harnesses/CODEX.md). |\n| Claude Code | `CLAUDE.md` | `.claude/skills/coasts/SKILL.md` | `.claude/commands/coasts.md` is optional, but keep the logic in the skill. See [Claude Code](harnesses/CLAUDE_CODE.md). |\n| Cursor | `AGENTS.md` or `.cursor/rules/coast.md` | `.cursor/skills/coasts/SKILL.md` or shared `.agents/skills/coasts/SKILL.md` | `.cursor/commands/coasts.md` is optional. `.cursor/worktrees.json` is for Cursor worktree bootstrap, not Coast policy. See [Cursor](harnesses/CURSOR.md). |\n| Conductor | `CLAUDE.md` | Start with `CLAUDE.md`; use Conductor scripts and settings for Conductor-specific behavior | Do not assume full Claude Code project command behavior. If a new command does not appear, fully close and reopen Conductor. See [Conductor](harnesses/CONDUCTOR.md). |\n| T3 Code | `AGENTS.md` | `.agents/skills/coasts/SKILL.md` | This is the most limited harness surface here. Use the Codex-style layout and do not invent a T3-native command layer for Coast docs. See [T3 Code](harnesses/T3_CODE.md). |\n\n## Let the agent set itself up\n\nThe fastest way is to let the agent write the right files itself. Copy the\nprompt below into your agent's chat — it includes the Coast Runtime block, the\n`coasts` skill block, and harness-specific instructions for where each piece\nbelongs.\n\n```prompt-copy\nskills_prompt.txt\n```\n\nYou can also get the same output from the CLI by running `coast skills-prompt`.\n\n## Manual setup\n\n- **Codex:** put the Coast Runtime section in `AGENTS.md`, then put the\n reusable `coasts` skill in `.agents/skills/coasts/SKILL.md`.\n- **Claude Code:** put the Coast Runtime section in `CLAUDE.md`, then put the\n reusable `coasts` skill in `.claude/skills/coasts/SKILL.md`. Only add\n `.claude/commands/coasts.md` if you specifically want a command file.\n- **Cursor:** put the Coast Runtime section in `AGENTS.md` if you want the most\n portable instructions, or in `.cursor/rules/coast.md` if you want a\n Cursor-native project rule. Put the reusable `coasts` workflow in\n `.cursor/skills/coasts/SKILL.md` for a Cursor-only repo, or in\n `.agents/skills/coasts/SKILL.md` if the repo is shared with other harnesses.\n Only add `.cursor/commands/coasts.md` if you specifically want an explicit\n command file.\n- **Conductor:** put the Coast Runtime section in `CLAUDE.md`. Use Conductor\n Repository Settings scripts for Conductor-specific bootstrap or run behavior.\n If you add a command and it does not appear, fully close and reopen the app.\n- **T3 Code:** use the same layout as Codex: `AGENTS.md` plus\n `.agents/skills/coasts/SKILL.md`. Treat T3 Code as a thin Codex-style\n harness here, not as a separate Coast command surface.\n- **Multiple harnesses:** keep the canonical skill in\n `.agents/skills/coasts/SKILL.md`. Cursor can load that directly; expose it to\n Claude Code through `.claude/skills/coasts/` if needed.\n\n## Further reading\n\n- Read the [Harnesses guide](harnesses/README.md) for the per-harness matrix\n- Read [Multiple Harnesses](harnesses/MULTIPLE_HARNESSES.md) for the shared\n layout pattern\n- Read the [Coastfiles documentation](coastfiles/README.md) to learn the full\n configuration schema\n- Learn the [Coast CLI](concepts_and_terminology/CLI.md) commands for managing\n instances\n- Explore [Coastguard](concepts_and_terminology/COASTGUARD.md), the web UI for\n observing and controlling your Coasts\n", + "doc_ordering.txt": "# Top-level\nREADME.md\nGETTING_STARTED.md\nSKILLS_FOR_HOST_AGENTS.md\n\n# Learn Coasts\nlearn-coasts-videos/README.md\nlearn-coasts-videos/coasts.md\nlearn-coasts-videos/ports.md\nlearn-coasts-videos/assign.md\nlearn-coasts-videos/checkout.md\nlearn-coasts-videos/volumes.md\nlearn-coasts-videos/secrets.md\nlearn-coasts-videos/getting-started.md\nlearn-coasts-videos/coast-ui.md\n\n# Harnesses\nharnesses/README.md\nharnesses/CODEX.md\nharnesses/CONDUCTOR.md\nharnesses/CLAUDE_CODE.md\nharnesses/CURSOR.md\nharnesses/T3_CODE.md\nharnesses/SHEP.md\nharnesses/MULTIPLE_HARNESSES.md\n\n# Concepts and Terminology\nconcepts_and_terminology/README.md\nconcepts_and_terminology/COASTS.md\nconcepts_and_terminology/RUN.md\nconcepts_and_terminology/REMOVE.md\nconcepts_and_terminology/FILESYSTEM.md\nconcepts_and_terminology/DAEMON.md\nconcepts_and_terminology/CLI.md\nconcepts_and_terminology/COASTGUARD.md\nconcepts_and_terminology/PORTS.md\nconcepts_and_terminology/PRIMARY_PORT_AND_DNS.md\nconcepts_and_terminology/ASSIGN.md\nconcepts_and_terminology/CHECKOUT.md\nconcepts_and_terminology/LOOKUP.md\nconcepts_and_terminology/VOLUMES.md\nconcepts_and_terminology/SHARED_SERVICES.md\nconcepts_and_terminology/SECRETS.md\nconcepts_and_terminology/BUILDS.md\nconcepts_and_terminology/COASTFILE_TYPES.md\nconcepts_and_terminology/RUNTIMES_AND_SERVICES.md\nconcepts_and_terminology/BARE_SERVICES.md\nconcepts_and_terminology/MIXED_SERVICE_TYPES.md\nconcepts_and_terminology/LOGS.md\nconcepts_and_terminology/EXEC_AND_DOCKER.md\nconcepts_and_terminology/AGENT_SHELLS.md\nconcepts_and_terminology/MCP_SERVERS.md\nconcepts_and_terminology/PERFORMANCE_OPTIMIZATIONS.md\nconcepts_and_terminology/TROUBLESHOOTING.md\n\n# Coastfiles\ncoastfiles/README.md\ncoastfiles/PROJECT.md\ncoastfiles/WORKTREE_DIR.md\ncoastfiles/PORTS.md\ncoastfiles/SHARED_SERVICES.md\ncoastfiles/SERVICES.md\ncoastfiles/SECRETS.md\ncoastfiles/VOLUMES.md\ncoastfiles/ASSIGN.md\ncoastfiles/INHERITANCE.md\ncoastfiles/AGENT_SHELL.md\ncoastfiles/MCP.md\n\n# Recipes\nrecipes/README.md\nrecipes/FULLSTACK_MONOREPO.md\n\n", + "installation_prompt.txt": "You are installing Coasts into this project. Coast (containerized host) is a CLI tool that runs multiple isolated development environments on a single machine using Docker-in-Docker containers. Each environment gets its own ports, volumes, and runtime — ideal for parallel worktree workflows.\n\nYour job: analyze this project and generate a Coastfile (a TOML file named \"Coastfile\" at the project root).\n\n=== DOCUMENTATION ===\n\nCoast has built-in docs accessible from the CLI. Use these to understand the full Coastfile schema, volume strategies, assign behavior, and other configuration options before generating a Coastfile.\n\nBrowse the docs tree:\n\n coast docs\n\nThis prints the full docs tree. Start by reading the README files — they provide indexes to help you find the right documentation for each topic:\n\n coast docs --path README.md\n coast docs --path coastfiles/README.md\n coast docs --path concepts_and_terminology/README.md\n\nRead a specific doc:\n\n coast docs --path coastfiles/PROJECT.md\n coast docs --path coastfiles/VOLUMES.md\n\nSearch the docs (semantic search — describe what you're looking for in natural language):\n\n coast search-docs \"how do volume strategies work\"\n coast search-docs \"shared postgres across instances\"\n coast search-docs \"secret injection from environment variables\"\n\nUse the docs to make informed decisions about this project's Coastfile configuration. The coastfiles/ section covers every Coastfile directive in detail.\n\n=== COASTFILE SCHEMA (quick reference) ===\n\n[coast] — Required. Project metadata.\n\n name (string, required) Project identifier used in container/volume naming.\n compose (string, optional) Path to docker-compose.yml relative to the Coastfile.\n runtime (string, optional) \"dind\" (default), \"sysbox\", or \"podman\".\n root (string, optional) Project root override (relative or absolute).\n worktree_dir (string or array, optional) Directory or directories for git worktrees (default: \".worktrees\"). Accepts a single string or an array of strings. Auto-detected from existing worktrees at runtime.\n\n[coast.setup] — Optional. Customize the DinD container itself.\n\n packages (array of strings) Alpine packages to install (e.g. [\"nodejs\", \"npm\", \"git\"]).\n run (array of strings) Arbitrary commands to run during setup.\n\n[ports] — Required (at least one). Map of logical name to port number.\n These ports are forwarded to the host when the coast is checked out.\n\n Example:\n [ports]\n web = 3000\n api = 8080\n postgres = 5432\n\n[volumes.*] — Optional. Per-volume configuration.\n\n strategy \"isolated\" (default) or \"shared\"\n service Compose service name that owns this volume.\n mount Mount path inside the service container.\n snapshot_source (isolated only) Seed from an existing volume name.\n\n Example:\n [volumes.postgres_data]\n strategy = \"isolated\"\n service = \"db\"\n mount = \"/var/lib/postgresql/data\"\n\n[secrets.*] — Optional. Secret extraction and injection.\n\n extractor \"file\", \"env\", \"command\", or \"macos-keychain\"\n inject \"env:VAR_NAME\" or \"file:/path/in/container\"\n ttl Optional expiry (e.g. \"1h\", \"30m\").\n\n Extractor-specific params:\n file: path = \"./path/to/secret\"\n env: var = \"HOST_ENV_VAR\"\n command: run = \"echo secret-value\"\n macos-keychain: item = \"keychain-item-name\"\n\n Example:\n [secrets.db_password]\n extractor = \"env\"\n var = \"DB_PASSWORD\"\n inject = \"env:DATABASE_PASSWORD\"\n\n[inject] — Optional. Non-secret host file/env injection.\n\n env Array of host env var names to forward.\n files Array of host file paths to mount.\n\n Example:\n [inject]\n env = [\"NODE_ENV\", \"DEBUG\"]\n files = [\"~/.ssh/id_ed25519\", \"~/.gitconfig\"]\n\n[shared_services.*] — Optional. Services on the host Docker daemon shared across instances.\n\n image Docker image.\n ports Array of port numbers.\n volumes Array of volume mounts.\n env Inline table of environment variables.\n auto_create_db (bool) Create a per-instance database automatically.\n inject Inject connection string into coast containers.\n\n Example:\n [shared_services.postgres]\n image = \"postgres:16-alpine\"\n ports = [5432]\n volumes = [\"postgres_data:/var/lib/postgresql/data\"]\n env = { POSTGRES_USER = \"dev\", POSTGRES_PASSWORD = \"dev\", POSTGRES_DB = \"app\" }\n auto_create_db = true\n inject = \"env:DATABASE_URL\"\n\n[assign] — Optional. Controls what happens on branch switch (coast assign).\n\n default \"none\", \"restart\", or \"rebuild\"\n [assign.services] Per-service overrides.\n [assign.rebuild_triggers] Per-service file globs that trigger rebuild.\n\n Example:\n [assign]\n default = \"none\"\n [assign.services]\n api = \"restart\"\n worker = \"rebuild\"\n [assign.rebuild_triggers]\n worker = [\"Dockerfile\", \"package.json\"]\n\n[services.*] — Optional. Bare process services (no docker-compose needed).\n\n command Shell command to run.\n port Port number.\n restart \"on-failure\" or \"always\".\n\n Example:\n [services.web]\n command = \"node server.js\"\n port = 3000\n restart = \"on-failure\"\n\n=== EXAMPLE: Minimal (no compose) ===\n\n[coast]\nname = \"my-app\"\nruntime = \"dind\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\"]\n\n[ports]\napp = 3000\n\n=== EXAMPLE: With docker-compose ===\n\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nruntime = \"dind\"\n\n[ports]\napp = 3000\npostgres = 5432\nredis = 6379\n\n[volumes.postgres_data]\nstrategy = \"shared\"\nservice = \"db\"\nmount = \"/var/lib/postgresql/data\"\n\n[volumes.redis_data]\nstrategy = \"isolated\"\nservice = \"cache\"\nmount = \"/data\"\n\n[assign]\ndefault = \"none\"\n\n[assign.services]\napp = \"rebuild\"\n\n=== EXAMPLE: With secrets ===\n\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nruntime = \"dind\"\n\n[ports]\napp = 3000\n\n[secrets.api_key]\nextractor = \"env\"\nvar = \"API_KEY\"\ninject = \"env:API_KEY\"\n\n[secrets.ssh_key]\nextractor = \"file\"\npath = \"~/.ssh/id_ed25519\"\ninject = \"file:/run/secrets/ssh_key\"\n\n=== KEY TRADEOFFS TO DISCUSS WITH THE USER ===\n\nBefore generating the Coastfile, ask the user about any ambiguous configuration choices. Here are the main ones:\n\nDatabase and infrastructure strategy — there are three options for services like postgres and redis:\n - Isolated volumes (default): each Coast instance gets its own copy of the data inside its DinD container. Instances cannot interfere with each other. Best when you want per-branch database state.\n - Shared volumes: all instances read and write the same volume inside their DinD containers. Saves disk space but concurrent writes from multiple instances can corrupt data.\n - Shared services: run the database on the host Docker daemon instead of inside each Coast. All instances connect to one shared server. Uses the least memory, supports auto_create_db for per-instance databases on a single postgres, and data outlives instance deletion. Best for large teams or memory-constrained machines.\n - If the project has a database, ask the user which approach they want. Explain the tradeoffs — isolated is safest, shared services is most memory-efficient.\n\nAssign strategy — what happens when switching a Coast between worktrees:\n - \"none\": do nothing (for services like postgres/redis that don't change between branches).\n - \"restart\": restart the container (for interpreted services that just need a process restart).\n - \"rebuild\": rebuild the Docker image and restart (for services where the branch change affects the Dockerfile or build dependencies).\n - If the project has multiple services, ask which ones need rebuilding vs restarting on branch switch.\n\n=== INSTRUCTIONS ===\n\n1. Look at this project's structure. If there is a docker-compose.yml, read it to identify services, ports, and volumes.\n2. Detect the existing git worktree directory. Run `git worktree list` to check if the project already has git worktrees set up.\n - If worktrees exist, examine their paths to determine the common parent directory (e.g., if worktrees are at `../.worktrees/feat-a` and `../.worktrees/feat-b`, the worktree_dir is `\"../.worktrees\"`).\n - Set `worktree_dir` in the Coastfile to match the detected directory.\n - If no worktrees exist, omit `worktree_dir` (Coast defaults to \".worktrees\"). Do NOT use \".coasts\" — that pollutes the project with a Coast-branded directory.\n3. Ask the user if they use any of these coding harnesses with this project:\n - **Claude Code** — worktrees at `.claude/worktrees`\n - **OpenAI Codex** — worktrees at `~/.codex/worktrees`\n - **Cursor** — worktrees at `~/.cursor/worktrees/` (where `` is the coast name from `[coast] name`)\n - **Conductor** — worktrees at `~/conductor/workspaces/`\n - **T3 Code** — worktrees at `~/.t3/worktrees/`\n For each harness the user selects, include its worktree directory in the `worktree_dir` array. Combine these with any directory detected in step 2. If the user selects none and no worktrees were detected in step 2, omit `worktree_dir` (Coast defaults to \".worktrees\").\n4. Read the relevant Coast docs (use `coast docs` and `coast search-docs`) to understand volume strategies, assign behavior, and any configuration options that apply to this project's stack.\n5. Ask the user about any ambiguous configuration choices (see tradeoffs above). Do not guess — explain the options and let them decide.\n6. Generate a Coastfile at the project root based on the project analysis and user input.\n7. If the project has no docker-compose.yml, use [services.*] for bare process definitions or [coast.setup] to install dependencies.\n8. Run `coast build`. If it fails, check the error and consult the docs (`coast search-docs \"\"`) to troubleshoot.\n9. Run `coast run dev-1`. If it fails, check the error and consult the docs.\n10. Run `coast ui` to open the Coastguard dashboard (this is for the user when you are done).\n", + "skills_prompt.txt": "# Coast Runtime\n\nThis project uses Coasts — containerized runtimes for running services, tests,\nand other runtime commands. The filesystem is shared between the host and the\ncontainer, so file edits on either side are visible to both immediately.\n\n## Discovery\n\nBefore the first runtime command in a session, run:\n\n coast lookup\n\nThis prints the instance name, ports, and example commands. Use the instance\nname from the output for all subsequent commands.\n\n## What runs where\n\nThe filesystem is shared, so only use `coast exec` for things that need the\ncontainer runtime (databases, services, integration tests). Everything else\nruns directly on the host.\n\nUse `coast exec` for:\n- Tests that need running services (unit tests that are integrated with services or dbs, integration tests, API tests)\n- Service restarts or compose operations\n- Anything that talks to databases, caches, or other container services\n\nRun directly on the host:\n- Linting, typechecking, formatting\n- Git operations\n- Playwright and browser tests\n- Installing host-side dependencies (npm install, pip install)\n- File search, code generation, static analysis\n\nExample:\n\n coast exec -- sh -c \"cd && npm test\" # needs DB\n coast exec --service # service shell\n npm run lint # host is fine\n npx playwright test # host is fine\n\n## Runtime feedback\n\n coast ps \n coast logs --service \n coast logs --service --tail 50\n\n## Creating and assigning Coasts\n\nIf `coast lookup` returns no match, run `coast ls` to see what exists.\n\nIf an unassigned Coast is already running for this project, prefer assigning\nyour worktree to it rather than creating a new one:\n\n coast assign -w \n\nAn already occupied Coast can also be reassigned with `coast assign`, but check\nwith the user first because that will disrupt the current slot.\n\nIf no Coast is running, ask the user before creating one — Coasts can be\nmemory intensive:\n\n coast run -w \n\nA project must be built before instances can be created. If `coast run` fails\nbecause no build exists, run `coast build` first.\n\n## Coastfile setup\n\nIf the project does not have a Coastfile yet, or if you need to modify the\nCoastfile, read the Coastfile docs first:\n\n coast docs --path coastfiles/README.md\n\n## When confused\n\nBefore guessing about Coast behavior, explore the docs:\n\n coast docs # list all doc pages\n coast docs --path concepts_and_terminology/RUN.md\n coast docs --path concepts_and_terminology/ASSIGN.md\n coast docs --path concepts_and_terminology/BUILDS.md\n coast search-docs \"your question here\" # semantic search\n\n## Rules\n\n- Always run `coast lookup` before your first runtime command in a session.\n- Use `coast exec` only for things that need the container runtime.\n- Use `coast exec --service ` when you need to run inside an app/service container.\n- Run linting, typechecking, formatting, and git on the host directly.\n- Use `coast docs` or `coast search-docs` before guessing about Coast behavior.\n- Do not run services directly on the host when the project expects Coast.\n\n---\nname: coasts\ndescription: Inspect and control Coast instances for the current checkout. Use\n when the user says \"/coasts\", asks to assign or reassign a Coast, wants to\n run commands or read logs in the matching Coast, wants to create a new Coast,\n or explicitly asks to open Coast UI.\n---\n\n# Coasts\n\nUse the Coast CLI directly. Do not add wrappers.\n\n## Orient Yourself\n\nStart by exploring the CLI and docs:\n\n coast # see all available commands\n coast docs # list all doc pages\n coast search-docs \"your question\" # semantic search\n\nWhen anything about Coast behavior is unclear, read the docs before guessing:\n\n coast docs --path concepts_and_terminology/RUN.md\n coast docs --path concepts_and_terminology/BUILDS.md\n coast docs --path concepts_and_terminology/ASSIGN.md\n coast docs --path concepts_and_terminology/PORTS.md\n coast docs --path coastfiles/README.md\n\n## Quick Start\n\nRoute requests into one of these modes:\n\n1. **Use Coast** — run `coast lookup`, then use `coast exec`, `coast ps`,\n or `coast logs` with the matching instance.\n2. **Create or Assign** — run `coast ls`, then `coast run` to create a new\n Coast or `coast assign` to repoint an existing one.\n3. **Open UI** — run `coast ui`.\n\n## What Runs Where\n\nThe host and the Coast share the filesystem. Only use `coast exec` for things\nthat need running services inside the container.\n\n**Use `coast exec` for:**\n- Integration tests, API tests, anything that needs databases or services\n- Service restarts, compose operations\n- Commands that talk to container-only processes\n\n**Run on the host:**\n- Linting (`eslint`, `rubocop`, `golangci-lint`)\n- Typechecking (`tsc --noEmit`, `go vet`)\n- Formatting (`prettier`, `gofmt`)\n- Git operations\n- Playwright and browser tests\n- Static analysis, code generation\n- Package installs (`npm install`, `pip install`)\n\n## Create and Assign\n\nWhen `coast lookup` returns no match:\n\n1. Run `coast ls` to see available slots.\n2. Prefer `coast run -w ` to create and assign in one step.\n3. If no build exists yet, run `coast build` first.\n4. After creating, rerun `coast lookup` to confirm.\n\nWhen you want to switch an existing Coast to a different worktree:\n\n coast assign -w \n\nThat also works for an already assigned or checked-out Coast, but ask the user\nfirst before reassigning an occupied slot.\n\n## Coastfile Setup\n\nIf the project needs a new or modified Coastfile, read the docs first:\n\n coast docs --path coastfiles/README.md\n\nThe Coastfile docs cover compose setup, ports, volumes, secrets, shared\nservices, bare services, and inheritance.\n\n## Safety Rules\n\n- Run `coast lookup` before taking action and again after any topology change.\n- Ask before `coast assign`, `coast unassign`, or `coast checkout` if it would\n disrupt an existing slot.\n- Prefer creating a new Coast over reusing a checked-out or already-assigned\n one unless the user explicitly wants the existing slot to be reassigned.\n- Use `coast docs` or `coast search-docs` before guessing.\n", "coastfiles/README.md": "# Coastfiles\n\nA Coastfile is a TOML configuration file that lives at the root of your project. It tells Coast everything it needs to know to build and run isolated development environments for that project — which services to run, which ports to forward, how to handle data, and how to manage secrets.\n\nEvery Coast project needs at least one Coastfile. The file is always named `Coastfile` (capital C, no extension). If you need variants for different workflows, you create typed Coastfiles like `Coastfile.light` or `Coastfile.snap` that [inherit from the base](INHERITANCE.md).\n\nFor a deeper understanding of how Coastfiles relate to the rest of Coast, see [Coasts](../concepts_and_terminology/COASTS.md) and [Builds](../concepts_and_terminology/BUILDS.md).\n\n## Quickstart\n\nThe smallest possible Coastfile:\n\n```toml\n[coast]\nname = \"my-app\"\n```\n\nThis gives you a DinD container you can `coast exec` into. Most projects will want either a `compose` reference or [bare services](SERVICES.md):\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\n\n[ports]\nweb = 3000\napi = 8080\n```\n\nOr without compose, using bare services:\n\n```toml\n[coast]\nname = \"my-app\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\"]\n\n[services.web]\ninstall = \"npm install\"\ncommand = \"npx next dev --port 3000 --hostname 0.0.0.0\"\nport = 3000\nrestart = \"on-failure\"\n\n[ports]\nweb = 3000\n```\n\nRun `coast build` then `coast run dev-1` and you have an isolated environment.\n\n## Example Coastfiles\n\n### Simple bare-service project\n\nA Next.js app with no compose file. Coast installs Node, runs `npm install`, and starts the dev server directly.\n\n```toml\n[coast]\nname = \"my-crm\"\nruntime = \"dind\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\"]\n\n[services.web]\ninstall = \"npm install\"\ncommand = \"npx next dev --turbopack --port 3002 --hostname 0.0.0.0\"\nport = 3002\nrestart = \"on-failure\"\n\n[ports]\nweb = 3002\n```\n\n### Full-stack compose project\n\nA multi-service project with shared databases, secrets, volume strategies, and custom setup.\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./infra/docker-compose.yml\"\nworktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]\nprimary_port = \"web\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\", \"python3\", \"curl\", \"git\", \"bash\", \"ca-certificates\", \"wget\"]\nrun = [\n \"ARCH=$(uname -m | sed 's/aarch64/arm64/' | sed 's/x86_64/amd64/') && wget -qO /tmp/go.tar.gz https://go.dev/dl/go1.24.1.linux-${ARCH}.tar.gz && tar -C /usr/local -xzf /tmp/go.tar.gz && rm /tmp/go.tar.gz\",\n \"GOBIN=/usr/local/bin go install github.com/air-verse/air@v1.61.7\",\n]\n\n[ports]\nweb = 3000\nbackend = 8080\npostgres = 5432\nredis = 6379\n\n[shared_services.postgres]\nimage = \"postgres:15\"\nports = [5432]\nvolumes = [\"infra_postgres_data:/var/lib/postgresql/data\"]\nenv = { POSTGRES_USER = \"myapp\", POSTGRES_PASSWORD = \"myapp_pass\" }\n\n[shared_services.redis]\nimage = \"redis:7\"\nports = [6379]\n\n[volumes.go_modules_cache]\nstrategy = \"shared\"\nservice = \"backend\"\nmount = \"/go/pkg/mod\"\n\n[secrets.db_password]\nextractor = \"env\"\nvar = \"DB_PASSWORD\"\ninject = \"env:DB_PASSWORD\"\n\n[omit]\nservices = [\"monitoring\", \"admin-panel\", \"nginx-proxy\"]\n\n[assign]\ndefault = \"none\"\n[assign.services]\nbackend = \"hot\"\nweb = \"hot\"\n```\n\n### Lightweight test variant (inheritance)\n\nExtends the base Coastfile but strips it down to only what's needed for running backend tests. No ports, no shared services, isolated databases.\n\n```toml\n[coast]\nextends = \"Coastfile\"\nautostart = false\n\n[unset]\nports = [\"web\", \"backend\", \"postgres\", \"redis\"]\nshared_services = [\"postgres\", \"redis\"]\n\n[omit]\nservices = [\"redis\", \"backend\", \"web\"]\n\n[volumes.postgres_data]\nstrategy = \"isolated\"\nservice = \"postgres\"\nmount = \"/var/lib/postgresql/data\"\n\n[assign]\ndefault = \"none\"\n[assign.services]\nbackend-test = \"rebuild\"\n```\n\n### Snapshot-seeded variant\n\nEach coast instance starts with a copy of the host's existing database volumes, then diverges independently.\n\n```toml\n[coast]\nextends = \"Coastfile\"\n\n[unset]\nshared_services = [\"postgres\", \"redis\", \"mongodb\"]\n\n[volumes.postgres_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_postgres_data\"\nservice = \"postgres\"\nmount = \"/var/lib/postgresql/data\"\n\n[volumes.redis_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_redis_data\"\nservice = \"redis\"\nmount = \"/data\"\n\n[volumes.mongodb_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_mongodb_data\"\nservice = \"mongodb\"\nmount = \"/data/db\"\n```\n\n## Conventions\n\n- The file must be named `Coastfile` (capital C, no extension) and live at the project root.\n- Typed variants use the pattern `Coastfile.{type}` — for example `Coastfile.light`, `Coastfile.snap`. See [Inheritance and Types](INHERITANCE.md).\n- The reserved name `Coastfile.default` is not allowed.\n- TOML syntax is used throughout. All section headers use `[brackets]` and named entries use `[section.name]` (not array-of-tables).\n- You cannot use both `compose` and `[services]` in the same Coastfile — pick one.\n- Relative paths (for `compose`, `root`, etc.) are resolved against the Coastfile's parent directory.\n\n## Reference\n\n| Page | Sections | What it covers |\n|------|----------|----------------|\n| [Project and Setup](PROJECT.md) | `[coast]`, `[coast.setup]` | Name, compose path, runtime, worktree dir, container setup |\n| [Worktree Directories](WORKTREE_DIR.md) | `worktree_dir`, `default_worktree_dir` | Local and external worktree dirs, tilde paths, Codex/Claude integration |\n| [Ports](PORTS.md) | `[ports]`, `[egress]` | Port forwarding, egress declarations, primary port |\n| [Volumes](VOLUMES.md) | `[volumes.*]` | Isolated, shared, and snapshot-seeded volume strategies |\n| [Shared Services](SHARED_SERVICES.md) | `[shared_services.*]` | Host-level databases and infrastructure services |\n| [Secrets](SECRETS.md) | `[secrets.*]`, `[inject]` | Secret extraction, injection, and host env/file forwarding |\n| [Bare Services](SERVICES.md) | `[services.*]` | Running processes directly without Docker Compose |\n| [Agent Shell](AGENT_SHELL.md) | `[agent_shell]` | Containerized agent TUI runtimes |\n| [MCP Servers](MCP.md) | `[mcp.*]`, `[mcp_clients.*]` | Internal and host-proxied MCP servers, client connectors |\n| [Assign](ASSIGN.md) | `[assign]` | Branch-switch behavior per service |\n| [Inheritance and Types](INHERITANCE.md) | `extends`, `includes`, `[unset]`, `[omit]` | Typed Coastfiles, composition, and overrides |\n", "coastfiles/AGENT_SHELL.md": "# Agent Shell\n\n> **In most workflows, you do not need to containerize your coding agent.** Because Coasts share the [filesystem](../concepts_and_terminology/FILESYSTEM.md) with your host machine, the simplest approach is to run the agent on your host and use [`coast exec`](../concepts_and_terminology/EXEC_AND_DOCKER.md) for runtime-heavy tasks like integration tests. Agent shells are for cases where you specifically want the agent running inside the container — for example, to give it direct access to the inner Docker daemon or to fully isolate its environment.\n\nThe `[agent_shell]` section configures an agent TUI — such as Claude Code or Codex — to run inside the Coast container. When present, Coast automatically spawns a persistent PTY session running the configured command when an instance starts.\n\nFor the full picture of how agent shells work — the active agent model, sending input, lifecycle and recovery — see [Agent Shells](../concepts_and_terminology/AGENT_SHELLS.md).\n\n## Configuration\n\nThe section has a single required field: `command`.\n\n```toml\n[agent_shell]\ncommand = \"claude --dangerously-skip-permissions\"\n```\n\n### `command` (required)\n\nThe shell command to run in the agent PTY. This is typically a coding agent CLI that you've installed via `[coast.setup]`.\n\nThe command runs inside the DinD container at `/workspace` (the project root). It is not a compose service — it runs alongside your compose stack or bare services, not inside them.\n\n## Lifecycle\n\n- The agent shell spawns automatically on `coast run`.\n- In [Coastguard](../concepts_and_terminology/COASTGUARD.md), it appears as a persistent \"Agent\" tab that cannot be closed.\n- If the agent process exits, Coast can respawn it.\n- You can send input to a running agent shell via `coast agent-shell input`.\n\n## Examples\n\n### Claude Code\n\nInstall Claude Code in `[coast.setup]`, configure credentials via [secrets](SECRETS.md), then set up the agent shell:\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\", \"git\", \"bash\"]\nrun = [\n \"npm install -g @anthropic-ai/claude-code\",\n \"mkdir -p /root/.claude\",\n]\n\n[secrets.claude_credentials]\nextractor = \"keychain\"\nservice = \"Claude Code-credentials\"\ninject = \"file:/root/.claude/.credentials.json\"\n\n[agent_shell]\ncommand = \"cd /workspace; exec claude --dangerously-skip-permissions --effort high\"\n```\n\n### Simple agent shell\n\nA minimal agent shell for testing that the feature works:\n\n```toml\n[coast]\nname = \"test-agent\"\n\n[coast.setup]\npackages = [\"bash\"]\n\n[agent_shell]\ncommand = \"exec sh -c 'while true; do echo agent-heartbeat; sleep 5; done'\"\n```\n", "coastfiles/ASSIGN.md": "# Assign\n\nThe `[assign]` section controls what happens to services inside a Coast instance when you switch branches with `coast assign`. Each service can be configured with a different strategy depending on whether it needs a full rebuild, a restart, a hot-reload, or nothing at all.\n\nFor how `coast assign` and `coast unassign` work at runtime, see [Assign](../concepts_and_terminology/ASSIGN.md).\n\n## `[assign]`\n\n### `default`\n\nThe default action applied to all services on branch switch. Defaults to `\"restart\"` if the entire `[assign]` section is omitted.\n\n- **`\"none\"`** — do nothing. The service keeps running as-is. Good for databases and caches that don't depend on code.\n- **`\"hot\"`** — the code is already live-mounted via the [filesystem](../concepts_and_terminology/FILESYSTEM.md), so the service picks up changes automatically (e.g. via a file watcher or hot-reload). No container restart needed.\n- **`\"restart\"`** — restart the service container. Use when the service reads code at startup but doesn't need a full image rebuild.\n- **`\"rebuild\"`** — rebuild the service's Docker image and restart. Required when code is baked into the image via `COPY` or `ADD` in the Dockerfile.\n\n```toml\n[assign]\ndefault = \"none\"\n```\n\n### `[assign.services]`\n\nPer-service overrides. Each key is a compose service name, and the value is one of the four actions above.\n\n```toml\n[assign]\ndefault = \"none\"\n\n[assign.services]\nbackend = \"hot\"\nweb = \"hot\"\n```\n\n```toml\n[assign]\ndefault = \"none\"\n\n[assign.services]\napp = \"rebuild\"\n```\n\nThis lets you leave databases and caches untouched (`\"none\"` via the default) while rebuilding or restarting only the services that depend on the code that changed.\n\n### `[assign.rebuild_triggers]`\n\nFile patterns that force a rebuild for specific services, even if their default action is something lighter. Each key is a service name, and the value is a list of file paths or patterns.\n\n```toml\n[assign]\ndefault = \"restart\"\n\n[assign.rebuild_triggers]\napi = [\"Dockerfile\", \"package.json\", \"package-lock.json\"]\n```\n\n### `exclude_paths`\n\nA list of paths to exclude from worktree sync during `coast assign`. Useful in large monorepos where certain directories are irrelevant to the services running in the Coast and would otherwise slow down the assign operation.\n\n```toml\n[assign]\ndefault = \"none\"\nexclude_paths = [\"apps/ide\", \"apps/extension\", \"apps/ide-extension\"]\n\n[assign.services]\nbackend = \"hot\"\nweb = \"hot\"\n```\n\n## Examples\n\n### Rebuild app, leave everything else alone\n\nWhen your app service bakes code into its Docker image but your databases are independent of code changes:\n\n```toml\n[assign]\ndefault = \"none\"\n\n[assign.services]\napp = \"rebuild\"\n```\n\n### Hot-reload frontend and backend\n\nWhen both services use file watchers (e.g. Next.js dev server, Go air, nodemon) and code is live-mounted:\n\n```toml\n[assign]\ndefault = \"none\"\n\n[assign.services]\nbackend = \"hot\"\nweb = \"hot\"\n```\n\n### Per-service rebuild with triggers\n\nThe API service normally just restarts, but if `Dockerfile` or `package.json` changed, it rebuilds:\n\n```toml\n[assign]\ndefault = \"none\"\n\n[assign.services]\napi = \"restart\"\nworker = \"restart\"\n\n[assign.rebuild_triggers]\napi = [\"Dockerfile\", \"package.json\"]\n```\n\n### Full rebuild for everything\n\nWhen all services bake code into their images:\n\n```toml\n[assign]\ndefault = \"rebuild\"\n```\n", @@ -356,7 +361,7 @@ "coastfiles/SERVICES.md": "# Bare Services\n\n> **Note:** Bare services run directly inside the Coast container as plain processes — they are not containerized. If your services are already Dockerized, use `compose` instead. Bare services are best suited for simple setups where you want to skip the overhead of writing a Dockerfile and docker-compose.yml.\n\nThe `[services.*]` sections define processes that Coast runs directly inside the DinD container, without Docker Compose. This is an alternative to using a `compose` file — you cannot use both in the same Coastfile.\n\nBare services are supervised by Coast with log capture and optional restart policies. For deeper background on how bare services work, their limitations, and when to migrate to compose, see [Bare Services](../concepts_and_terminology/BARE_SERVICES.md).\n\n## Defining a service\n\nEach service is a named TOML section under `[services]`. The `command` field is required.\n\n```toml\n[services.web]\ncommand = \"node server.js\"\nport = 3000\n```\n\n### `command` (required)\n\nThe shell command to run. Must not be empty or whitespace-only.\n\n```toml\n[services.web]\ncommand = \"npx next dev --turbopack --port 3000 --hostname 0.0.0.0\"\n```\n\n### `port`\n\nThe port the service listens on. Used for health checking and port forwarding integration. Must be non-zero if specified.\n\n```toml\n[services.web]\ncommand = \"npx next dev --port 3000 --hostname 0.0.0.0\"\nport = 3000\n```\n\n### `restart`\n\nRestart policy if the process exits. Defaults to `\"no\"`.\n\n- `\"no\"` — do not restart\n- `\"on-failure\"` — restart only if the process exits with a non-zero code\n- `\"always\"` — always restart\n\n```toml\n[services.web]\ncommand = \"node server.js\"\nport = 3000\nrestart = \"on-failure\"\n```\n\n### `install`\n\nCommands to run before starting the service (e.g. installing dependencies). Accepts either a single string or an array of strings.\n\n```toml\n[services.web]\ninstall = \"npm install\"\ncommand = \"npx next dev --port 3000 --hostname 0.0.0.0\"\nport = 3000\n```\n\n```toml\n[services.web]\ninstall = [\"npm install\", \"npm run build\"]\ncommand = \"npm start\"\nport = 3000\n```\n\n## Mutual exclusion with compose\n\nA Coastfile cannot define both `compose` and `[services]`. If you have a `compose` field in `[coast]`, adding any `[services.*]` section is an error. Choose one approach per Coastfile.\n\nIf you need some services containerized via compose and some running bare, use compose for all of them — see [the migration guidance in Bare Services](../concepts_and_terminology/BARE_SERVICES.md) for how to move from bare services to compose.\n\n## Examples\n\n### Single-service Next.js app\n\n```toml\n[coast]\nname = \"my-frontend\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\"]\n\n[services.web]\ninstall = \"npm install\"\ncommand = \"npx next dev --turbopack --port 3002 --hostname 0.0.0.0\"\nport = 3002\nrestart = \"on-failure\"\n\n[ports]\nweb = 3002\n```\n\n### Web server with background worker\n\n```toml\n[coast]\nname = \"my-app\"\n\n[coast.setup]\npackages = [\"nodejs\", \"npm\"]\n\n[services.web]\ninstall = \"npm install\"\ncommand = \"node server.js\"\nport = 3000\nrestart = \"on-failure\"\n\n[services.worker]\ncommand = \"node worker.js\"\nrestart = \"always\"\n\n[ports]\nweb = 3000\n```\n\n### Python service with multi-step install\n\n```toml\n[coast]\nname = \"ml-service\"\n\n[coast.setup]\npackages = [\"python3\", \"py3-pip\"]\n\n[services.api]\ninstall = [\"pip install -r requirements.txt\", \"python manage.py migrate\"]\ncommand = \"python manage.py runserver 0.0.0.0:8000\"\nport = 8000\nrestart = \"on-failure\"\n\n[ports]\napi = 8000\n```\n", "coastfiles/SHARED_SERVICES.md": "# Shared Services\n\nThe `[shared_services.*]` sections define infrastructure services — databases, caches, message brokers — that run on the host Docker daemon rather than inside individual Coast containers. Multiple Coast instances connect to the same shared service over a bridge network.\n\nFor how shared services work at runtime, lifecycle management, and troubleshooting, see [Shared Services](../concepts_and_terminology/SHARED_SERVICES.md).\n\n## Defining a shared service\n\nEach shared service is a named TOML section under `[shared_services]`. The `image` field is required; everything else is optional.\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:16\"\nports = [5432]\nenv = { POSTGRES_PASSWORD = \"dev\" }\n```\n\n### `image` (required)\n\nThe Docker image to run on the host daemon.\n\n### `ports`\n\nList of ports the service exposes. Coast accepts either bare container ports or\nDocker Compose-style `\"HOST:CONTAINER\"` mappings.\n\n```toml\n[shared_services.redis]\nimage = \"redis:7-alpine\"\nports = [6379]\n```\n\n```toml\n[shared_services.postgis]\nimage = \"ghcr.io/baosystems/postgis:12-3.3\"\nports = [\"5433:5432\"]\n```\n\n- A bare integer like `6379` is shorthand for `\"6379:6379\"`.\n- A mapped string like `\"5433:5432\"` publishes the shared service on host port\n `5433` while keeping it reachable inside Coasts at `service-name:5432`.\n- Host and container ports must both be non-zero.\n\n### `volumes`\n\nDocker volume bind strings for persisting data. These are host-level Docker volumes, not Coast-managed volumes.\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:15\"\nports = [5432]\nvolumes = [\"infra_postgres_data:/var/lib/postgresql/data\"]\n```\n\n### `env`\n\nEnvironment variables passed to the service container.\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:15\"\nports = [5432]\nvolumes = [\"infra_postgres_data:/var/lib/postgresql/data\"]\nenv = { POSTGRES_USER = \"myapp\", POSTGRES_PASSWORD = \"myapp_pass\", POSTGRES_DB = \"mydb\" }\n```\n\n### `auto_create_db`\n\nWhen `true`, Coast automatically creates a per-instance database inside the shared service for each Coast instance. Defaults to `false`.\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:16\"\nports = [5432]\nenv = { POSTGRES_PASSWORD = \"dev\" }\nauto_create_db = true\n```\n\n### `inject`\n\nInjects the shared service connection info into Coast instances as an environment variable or file. Uses the same `env:NAME` or `file:/path` format as [secrets](SECRETS.md).\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:16\"\nports = [5432]\nenv = { POSTGRES_PASSWORD = \"dev\" }\ninject = \"env:DATABASE_URL\"\n```\n\n## Lifecycle\n\nShared services start automatically when the first Coast instance that references them runs. They keep running across `coast stop` and `coast rm` — removing an instance does not affect shared service data. Only `coast shared rm` stops and removes a shared service.\n\nPer-instance databases created by `auto_create_db` also survive instance deletion. Use `coast shared-services rm` to remove the service and its data entirely.\n\n## When to use shared services vs volumes\n\nUse shared services when multiple Coast instances need to talk to the same database server (e.g. a shared Postgres where each instance gets its own database). Use [volume strategies](VOLUMES.md) when you want to control how a compose-internal service's data is shared or isolated.\n\n## Examples\n\n### Postgres, Redis, and MongoDB\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:15\"\nports = [5432]\nvolumes = [\"infra_postgres_data:/var/lib/postgresql/data\"]\nenv = { POSTGRES_USER = \"myapp\", POSTGRES_PASSWORD = \"myapp_pass\", POSTGRES_MULTIPLE_DATABASES = \"dev_db,test_db\" }\n\n[shared_services.redis]\nimage = \"redis:7\"\nports = [6379]\nvolumes = [\"infra_redis_data:/data\"]\n\n[shared_services.mongodb]\nimage = \"mongo:latest\"\nports = [27017]\nvolumes = [\"infra_mongodb_data:/data/db\"]\nenv = { MONGO_INITDB_ROOT_USERNAME = \"myapp\", MONGO_INITDB_ROOT_PASSWORD = \"myapp_pass\" }\n```\n\n### Minimal shared Postgres\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:16-alpine\"\nports = [5432]\nenv = { POSTGRES_USER = \"coast\", POSTGRES_PASSWORD = \"coast\", POSTGRES_DB = \"coast_demo\" }\n```\n\n### Host/container mapped shared Postgres\n\n```toml\n[shared_services.postgres]\nimage = \"postgres:16-alpine\"\nports = [\"5433:5432\"]\nenv = { POSTGRES_USER = \"coast\", POSTGRES_PASSWORD = \"coast\", POSTGRES_DB = \"coast_demo\" }\n```\n\n### Shared services with auto-created databases\n\n```toml\n[shared_services.db]\nimage = \"postgres:16-alpine\"\nports = [5432]\nenv = { POSTGRES_USER = \"coast\", POSTGRES_PASSWORD = \"coast\" }\nauto_create_db = true\n```\n", "coastfiles/VOLUMES.md": "# Volumes\n\nThe `[volumes.*]` sections control how named Docker volumes are handled across Coast instances. Each volume is configured with a strategy that determines whether instances share data or get their own independent copy.\n\nFor the broader picture of data isolation in Coast — including shared services as an alternative — see [Volumes](../concepts_and_terminology/VOLUMES.md).\n\n## Defining a volume\n\nEach volume is a named TOML section under `[volumes]`. Three fields are required:\n\n- **`strategy`** — `\"isolated\"` or `\"shared\"`\n- **`service`** — the compose service name that uses this volume\n- **`mount`** — the container mount path for the volume\n\n```toml\n[volumes.postgres_data]\nstrategy = \"isolated\"\nservice = \"db\"\nmount = \"/var/lib/postgresql/data\"\n```\n\n## Strategies\n\n### `isolated`\n\nEach Coast instance gets its own independent volume. Data is not shared between instances. Volumes are created on `coast run` and deleted on `coast rm`.\n\n```toml\n[volumes.redis_data]\nstrategy = \"isolated\"\nservice = \"cache\"\nmount = \"/data\"\n```\n\nThis is the right choice for most database volumes — each instance gets a clean slate and can mutate data freely without affecting other instances.\n\n### `shared`\n\nAll Coast instances use a single Docker volume. Any data written by one instance is visible to all others.\n\n```toml\n[volumes.go_modules_cache]\nstrategy = \"shared\"\nservice = \"backend\"\nmount = \"/go/pkg/mod\"\n```\n\nShared volumes are never deleted by `coast rm`. They persist until you remove them manually.\n\nCoast prints a warning at build time if you use `shared` on a volume attached to a database-like service. Sharing a single database volume across multiple concurrent instances can cause corruption. If you need shared databases, use [shared services](SHARED_SERVICES.md) instead.\n\nGood uses for shared volumes: dependency caches (Go modules, npm cache, pip cache), build artifact caches, and other data where concurrent writes are safe or unlikely.\n\n## Snapshot seeding\n\nIsolated volumes can be seeded from an existing Docker volume at instance creation time using `snapshot_source`. The source volume's data is copied into the new isolated volume, which then diverges independently.\n\n```toml\n[volumes.postgres_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_postgres_data\"\nservice = \"db\"\nmount = \"/var/lib/postgresql/data\"\n```\n\n`snapshot_source` is only valid with `strategy = \"isolated\"`. Setting it on a shared volume is an error.\n\nThis is useful when you want each Coast instance to start with a realistic dataset copied from your host development database, but you want instances to be free to mutate that data without affecting the source or each other.\n\n## Examples\n\n### Isolated databases, shared dependency cache\n\n```toml\n[volumes.postgres_data]\nstrategy = \"isolated\"\nservice = \"db\"\nmount = \"/var/lib/postgresql/data\"\n\n[volumes.redis_data]\nstrategy = \"isolated\"\nservice = \"cache\"\nmount = \"/data\"\n\n[volumes.go_modules_cache]\nstrategy = \"shared\"\nservice = \"backend\"\nmount = \"/go/pkg/mod\"\n```\n\n### Snapshot-seeded full stack\n\nEach instance starts with a copy of your host's existing database volumes, then diverges independently.\n\n```toml\n[volumes.postgres_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_postgres_data\"\nservice = \"postgres\"\nmount = \"/var/lib/postgresql/data\"\n\n[volumes.redis_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_redis_data\"\nservice = \"redis\"\nmount = \"/data\"\n\n[volumes.mongodb_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_mongodb_data\"\nservice = \"mongodb\"\nmount = \"/data/db\"\n```\n\n### Test runner with clean databases per instance\n\n```toml\n[volumes.postgres_data]\nstrategy = \"isolated\"\nservice = \"postgres\"\nmount = \"/var/lib/postgresql/data\"\n\n[volumes.redis_data]\nstrategy = \"isolated\"\nservice = \"test-redis\"\nmount = \"/data\"\n\n[volumes.mongodb_data]\nstrategy = \"isolated\"\nservice = \"mongodb\"\nmount = \"/data/db\"\n```\n", - "coastfiles/WORKTREE_DIR.md": "# Worktree Directories\n\nThe `worktree_dir` field in `[coast]` controls where git worktrees live. Coast uses git worktrees to give each instance its own copy of the codebase on a different branch, without duplicating the full repo.\n\n## Syntax\n\n`worktree_dir` accepts a single string or an array of strings:\n\n```toml\n# Single directory (default)\nworktree_dir = \".worktrees\"\n\n# Multiple directories\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\"]\n```\n\nWhen omitted, defaults to `\".worktrees\"`.\n\n## Path types\n\n### Relative paths\n\nPaths that don't start with `~/` or `/` are resolved relative to the project root. These are the most common and require no special handling — they're inside the project directory and automatically available inside the Coast container via the standard `/host-project` bind mount.\n\n```toml\nworktree_dir = \".worktrees\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\"]\n```\n\n### Tilde paths (external)\n\nPaths starting with `~/` are expanded to the user's home directory and treated as **external** worktree directories. Coast adds a separate bind mount so the container can access them.\n\n```toml\nworktree_dir = [\"~/.codex/worktrees\", \".worktrees\"]\n```\n\nThis is how you integrate with tools that create worktrees outside your project root, such as OpenAI Codex (which always creates worktrees at `$CODEX_HOME/worktrees`).\n\n### Absolute paths (external)\n\nPaths starting with `/` are also treated as external and get their own bind mount.\n\n```toml\nworktree_dir = [\"/shared/worktrees\", \".worktrees\"]\n```\n\n## How external directories work\n\nWhen Coast encounters an external worktree directory (tilde or absolute path), three things happen:\n\n1. **Container bind mount** — At container creation time (`coast run`), the resolved host path is bind-mounted into the container at `/host-external-wt/{index}`, where `{index}` is the position in the `worktree_dir` array. This makes the external files accessible inside the container.\n\n2. **Project filtering** — External directories may contain worktrees for multiple projects. Coast uses `git worktree list --porcelain` (which is inherently scoped to the current repository) to discover only the worktrees that belong to this project. The git watcher also verifies ownership by reading each worktree's `.git` file and checking that its `gitdir:` pointer resolves back to the current repo.\n\n3. **Workspace remount** — When you `coast assign` to an external worktree, Coast remounts `/workspace` from the external bind mount path instead of the usual `/host-project/{dir}/{name}`.\n\n## Naming of external worktrees\n\nExternal worktrees with a branch checked out appear by their branch name, the same as local worktrees.\n\nExternal worktrees on a **detached HEAD** (common with Codex) appear using their relative path within the external directory. For example, a Codex worktree at `~/.codex/worktrees/a0db/coastguard-platform` appears as `a0db/coastguard-platform` in the UI and CLI.\n\n## `default_worktree_dir`\n\nControls which directory is used when Coast creates a **new** worktree (e.g., when you assign a branch that doesn't have an existing worktree). Defaults to the first entry in `worktree_dir`.\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]\ndefault_worktree_dir = \".worktrees\"\n```\n\nExternal directories are never used for creating new worktrees — Coast always creates worktrees in a local (relative) directory. The `default_worktree_dir` field is only needed when you want to override the default (first entry).\n\n## Examples\n\n### Codex integration\n\nOpenAI Codex creates worktrees at `~/.codex/worktrees/{hash}/{project-name}`. To make these visible and assignable in Coast:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]\n```\n\nAfter adding this, Codex's worktrees show up in the checkout modal and `coast ls` output. You can assign a Coast instance to a Codex worktree to run its code in a full development environment.\n\nNote: the container must be recreated (`coast run`) after adding an external directory for the bind mount to take effect. Restarting an existing instance is not sufficient.\n\n### Claude Code integration\n\nClaude Code creates worktrees inside the project at `.claude/worktrees/`. Since this is a relative path (inside the project root), it works like any other local worktree directory — no external mount needed:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\"]\n```\n\n### All three together\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\"]\n```\n\n## Live Coastfile reading\n\nChanges to `worktree_dir` in your Coastfile take effect immediately for worktree **listing** (the API and git watcher read the live Coastfile from disk, not just the cached build artifact). However, external **bind mounts** are only created at container creation time, so you need to recreate the instance for a newly added external directory to be mountable.\n", + "coastfiles/WORKTREE_DIR.md": "# Worktree Directories\n\nThe `worktree_dir` field in `[coast]` controls where git worktrees live. Coast uses git worktrees to give each instance its own copy of the codebase on a different branch, without duplicating the full repo.\n\n## Syntax\n\n`worktree_dir` accepts a single string or an array of strings:\n\n```toml\n# Single directory (default)\nworktree_dir = \".worktrees\"\n\n# Multiple directories\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\"]\n```\n\nWhen omitted, defaults to `\".worktrees\"`.\n\n## Path types\n\n### Relative paths\n\nPaths that don't start with `~/` or `/` are resolved relative to the project root. These are the most common and require no special handling — they're inside the project directory and automatically available inside the Coast container via the standard `/host-project` bind mount.\n\n```toml\nworktree_dir = \".worktrees\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\"]\n```\n\n### Tilde paths (external)\n\nPaths starting with `~/` are expanded to the user's home directory and treated as **external** worktree directories. Coast adds a separate bind mount so the container can access them.\n\n```toml\nworktree_dir = [\"~/.codex/worktrees\", \".worktrees\"]\n```\n\nThis is how you integrate with tools that create worktrees outside your project root, such as OpenAI Codex (which always creates worktrees at `$CODEX_HOME/worktrees`).\n\n### Absolute paths (external)\n\nPaths starting with `/` are also treated as external and get their own bind mount.\n\n```toml\nworktree_dir = [\"/shared/worktrees\", \".worktrees\"]\n```\n\n### Glob patterns (external)\n\nExternal paths can contain glob metacharacters (`*`, `?`, `[...]`). Coast expands them at runtime against the host filesystem, creating a bind mount for each matching directory.\n\n```toml\nworktree_dir = [\".worktrees\", \"~/.shep/repos/*/wt\"]\n```\n\nThis is useful when a tool generates worktrees under a path component that varies per project (like a hash). The `*` matches any single directory name, so `~/.shep/repos/*/wt` matches `~/.shep/repos/a21f0cda9ab9d456/wt` and any other hash directory that contains a `wt` subdirectory.\n\nSupported glob syntax:\n\n- `*` — matches any sequence of characters within a single path component\n- `?` — matches any single character\n- `[abc]` — matches any character in the set\n- `[!abc]` — matches any character not in the set\n\nGlob expansion happens everywhere worktree dirs are resolved: container creation, assign, start, lookup, and the git watcher. Matches are sorted for deterministic ordering. If a glob matches no directories, it is silently skipped.\n\nLike other external paths, the container must be recreated (`coast run`) after adding a glob pattern for the bind mount to take effect.\n\n## How external directories work\n\nWhen Coast encounters an external worktree directory (tilde or absolute path), three things happen:\n\n1. **Container bind mount** — At container creation time (`coast run`), the resolved host path is bind-mounted into the container at `/host-external-wt/{index}`, where `{index}` is the position in the `worktree_dir` array. This makes the external files accessible inside the container.\n\n2. **Project filtering** — External directories may contain worktrees for multiple projects. Coast uses `git worktree list --porcelain` (which is inherently scoped to the current repository) to discover only the worktrees that belong to this project. The git watcher also verifies ownership by reading each worktree's `.git` file and checking that its `gitdir:` pointer resolves back to the current repo.\n\n3. **Workspace remount** — When you `coast assign` to an external worktree, Coast remounts `/workspace` from the external bind mount path instead of the usual `/host-project/{dir}/{name}`.\n\n## Naming of external worktrees\n\nExternal worktrees with a branch checked out appear by their branch name, the same as local worktrees.\n\nExternal worktrees on a **detached HEAD** (common with Codex) appear using their relative path within the external directory. For example, a Codex worktree at `~/.codex/worktrees/a0db/coastguard-platform` appears as `a0db/coastguard-platform` in the UI and CLI.\n\n## `default_worktree_dir`\n\nControls which directory is used when Coast creates a **new** worktree (e.g., when you assign a branch that doesn't have an existing worktree). Defaults to the first entry in `worktree_dir`.\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]\ndefault_worktree_dir = \".worktrees\"\n```\n\nExternal directories are never used for creating new worktrees — Coast always creates worktrees in a local (relative) directory. The `default_worktree_dir` field is only needed when you want to override the default (first entry).\n\n## Examples\n\n### Codex integration\n\nOpenAI Codex creates worktrees at `~/.codex/worktrees/{hash}/{project-name}`. To make these visible and assignable in Coast:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]\n```\n\nAfter adding this, Codex's worktrees show up in the checkout modal and `coast ls` output. You can assign a Coast instance to a Codex worktree to run its code in a full development environment.\n\nNote: the container must be recreated (`coast run`) after adding an external directory for the bind mount to take effect. Restarting an existing instance is not sufficient.\n\n### Claude Code integration\n\nClaude Code creates worktrees inside the project at `.claude/worktrees/`. Since this is a relative path (inside the project root), it works like any other local worktree directory — no external mount needed:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\"]\n```\n\n### Shep integration\n\nShep creates worktrees at `~/.shep/repos/{hash}/wt/{branch-slug}` where the hash is per-repo. Use a glob pattern to match the hash directory:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.shep/repos/*/wt\"]\n```\n\n### All harnesses together\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\", \"~/.shep/repos/*/wt\"]\n```\n\n## Live Coastfile reading\n\nChanges to `worktree_dir` in your Coastfile take effect immediately for worktree **listing** (the API and git watcher read the live Coastfile from disk, not just the cached build artifact). However, external **bind mounts** are only created at container creation time, so you need to recreate the instance for a newly added external directory to be mountable.\n", "concepts_and_terminology/README.md": "# Concepts and Terminology\n\nThis section covers the core concepts and vocabulary used throughout Coasts. If you are new to Coasts, start here before diving into configuration or advanced usage.\n\n- [Coasts](COASTS.md) — self-contained runtimes of your project, each with its own ports, volumes, and worktree assignment.\n- [Run](RUN.md) — creating a new Coast instance from the latest build, optionally assigning a worktree.\n- [Remove](REMOVE.md) — tearing down a Coast instance and its isolated runtime state when you need a clean recreate or want to take Coasts down.\n- [Filesystem](FILESYSTEM.md) — the shared mount between host and Coast, host-side agents, and worktree switching.\n- [Coast Daemon](DAEMON.md) — the local `coastd` control plane that executes lifecycle operations.\n- [Coast CLI](CLI.md) — the terminal interface for commands, scripts, and agent workflows.\n- [Coastguard](COASTGUARD.md) — the web UI launched with `coast ui` for observability and control.\n- [Ports](PORTS.md) — canonical ports vs dynamic ports and how checkout swaps between them.\n- [Primary Port & DNS](PRIMARY_PORT_AND_DNS.md) — quick-links to your primary service, subdomain routing for cookie isolation, and URL templates.\n- [Assign and Unassign](ASSIGN.md) — switching a Coast between worktrees and the available assign strategies.\n- [Checkout](CHECKOUT.md) — mapping canonical ports to a Coast instance and when you need it.\n- [Lookup](LOOKUP.md) — discovering which Coast instances match the agent's current worktree.\n- [Volume Topology](VOLUMES.md) — shared services, shared volumes, isolated volumes, and snapshotting.\n- [Shared Services](SHARED_SERVICES.md) — host-managed infrastructure services and volume disambiguation.\n- [Secrets and Extractors](SECRETS.md) — extracting host secrets and injecting them into Coast containers.\n- [Builds](BUILDS.md) — the anatomy of a coast build, where artifacts live, auto-pruning, and typed builds.\n- [Coastfile Types](COASTFILE_TYPES.md) — composable Coastfile variants with extends, unset, omit, and autostart.\n- [Runtimes and Services](RUNTIMES_AND_SERVICES.md) — the DinD runtime, Docker-in-Docker architecture, and how services run inside a Coast.\n- [Bare Services](BARE_SERVICES.md) — running non-containerized processes inside a Coast and why you should containerize instead.\n- [Logs](LOGS.md) — reading service logs from inside a Coast, the MCP tradeoff, and the Coastguard log viewer.\n- [Exec & Docker](EXEC_AND_DOCKER.md) — running commands inside a Coast and talking to the inner Docker daemon.\n- [Agent Shells](AGENT_SHELLS.md) — containerized agent TUIs, the OAuth tradeoff, and why you should probably run agents on the host instead.\n- [MCP Servers](MCP_SERVERS.md) — configuring MCP tools inside a Coast for containerized agents, internal vs host-proxied servers.\n- [Troubleshooting](TROUBLESHOOTING.md) — doctor, daemon restart, project removal, and the factory-reset nuke option.\n", "concepts_and_terminology/AGENT_SHELLS.md": "# Agent Shells\n\nAgent shells are shells inside a Coast that open directly to an agent TUI runtime — Claude Code, Codex, or any CLI agent. You configure them with an `[agent_shell]` section in your Coastfile and Coast spawns the agent process inside the DinD container.\n\n**For most use cases, you should not do this.** Run your coding agents on the host machine instead. The shared [filesystem](FILESYSTEM.md) means a host-side agent can edit code normally while calling [`coast logs`](LOGS.md), [`coast exec`](EXEC_AND_DOCKER.md), and [`coast ps`](RUNTIMES_AND_SERVICES.md) for runtime information. Agent shells add credential mounting, OAuth complications, and lifecycle complexity that you do not need unless you have a specific reason to containerize the agent itself.\n\n## The OAuth Problem\n\nIf you are using Claude Code, Codex, or similar tools that authenticate via OAuth, the token was issued for your host machine. When that same token is used from inside a Linux container — different user agent, different environment — the provider may flag or revoke it. You will get intermittent authentication failures that are difficult to debug.\n\nFor containerized agents, API-key-based authentication is the safer choice. Set the key as a [secret](SECRETS.md) in your Coastfile and inject it into the container environment.\n\nIf API keys are not an option, you can mount OAuth credentials into the Coast (see the Configuration section below), but expect friction. On macOS, if you use the `keychain` secret extractor to pull OAuth tokens, every `coast build` will prompt for your macOS Keychain password. This makes the build process tedious, especially when rebuilding frequently. The Keychain prompt is a macOS security requirement and cannot be bypassed.\n\n## Configuration\n\nAdd an `[agent_shell]` section to your Coastfile with the command to run:\n\n```toml\n[agent_shell]\ncommand = \"claude --dangerously-skip-permissions\"\n```\n\nThe command is executed inside the DinD container at `/workspace`. Coast creates a `coast` user inside the container, copies credentials from `/root/.claude/` to `/home/coast/.claude/`, and runs the command as that user. If your agent needs credentials mounted into the container, use `[secrets]` with file injection (see [Secrets and Extractors](SECRETS.md)) and `[coast.setup]` to install the agent CLI:\n\n```toml\n[coast.setup]\nrun = [\"npm install -g @anthropic-ai/claude-code\"]\n\n[secrets.claude_credentials]\nextractor = \"keychain\"\nservice = \"Claude Code-credentials\"\ninject = \"file:/root/.claude/.credentials.json\"\n\n[agent_shell]\ncommand = \"claude --dangerously-skip-permissions\"\n```\n\nIf `[agent_shell]` is configured, Coast auto-spawns a shell when the instance starts. The configuration is inherited via `extends` and can be overridden per [Coastfile type](COASTFILE_TYPES.md).\n\n## The Active Agent Model\n\nEach Coast instance can have multiple agent shells, but only one is **active** at a time. The active shell is the default target for commands that do not specify a `--shell` ID.\n\n```bash\ncoast agent-shell dev-1 ls\n\n SHELL STATUS ACTIVE\n 1 running ★\n 2 running\n```\n\nSwitch the active shell:\n\n```bash\ncoast agent-shell dev-1 activate 2\n```\n\nYou cannot close the active shell — activate a different one first. This prevents accidentally killing the shell you are interacting with.\n\nIn Coastguard, agent shells appear as tabs in the Exec panel with active/inactive badges. Click a tab to view its terminal; use the dropdown menu to activate, spawn, or close shells.\n\n![Agent shell in Coastguard](../../assets/coastguard-agent-shell.png)\n*An agent shell running Claude Code inside a Coast instance, accessible from the Exec tab in Coastguard.*\n\n## Sending Input\n\nThe primary way to drive a containerized agent programmatically is `coast agent-shell input`:\n\n```bash\ncoast agent-shell dev-1 input \"fix the failing test in auth.test.ts\"\n```\n\nThis writes the text to the active agent's TUI and presses Enter. The agent receives it as if you typed it into the terminal.\n\nOptions:\n\n- `--no-send` — write the text without pressing Enter. Useful for building up partial input or navigating TUI menus.\n- `--shell ` — target a specific shell instead of the active one.\n- `--show-bytes` — print the exact bytes being sent, for debugging.\n\nUnder the hood, input is written directly to the PTY master file descriptor. The text and Enter keystroke are sent as two separate writes with a 25ms gap to avoid paste-mode artifacts that some TUI frameworks exhibit when receiving rapid input.\n\n## Other Commands\n\n```bash\ncoast agent-shell dev-1 spawn # create a new shell\ncoast agent-shell dev-1 spawn --activate # create and immediately activate\ncoast agent-shell dev-1 tty # attach interactive TTY to active shell\ncoast agent-shell dev-1 tty --shell 2 # attach to a specific shell\ncoast agent-shell dev-1 read-output # read full scrollback buffer\ncoast agent-shell dev-1 read-last-lines 50 # read last 50 lines of output\ncoast agent-shell dev-1 session-status # check if the shell process is alive\n```\n\n`tty` gives you a live interactive session — you can type directly into the agent's TUI. Detach with the standard terminal escape sequence. `read-output` and `read-last-lines` are non-interactive and return text, which is useful for scripting and automation.\n\n## Lifecycle and Recovery\n\nAgent shell sessions persist in Coastguard across page navigation. The scrollback buffer (up to 512KB) is replayed when you reconnect to a tab.\n\nWhen you stop a Coast instance with `coast stop`, all agent shell PTY processes are killed and their database records are cleaned up. `coast start` auto-spawns a fresh agent shell if `[agent_shell]` is configured.\n\nAfter a daemon restart, previously running agent shells will show as dead. The system detects this automatically — if the active shell is dead, the first live shell is promoted to active. If no shells are alive, spawn a new one with `coast agent-shell spawn --activate`.\n\n## Who This Is For\n\nAgent shells are designed for **products building first-party integrations** around Coasts — orchestration platforms, agent wrappers, and tools that want to manage containerized coding agents programmatically via the `input`, `read-output`, and `session-status` APIs.\n\nFor general-purpose parallel agent coding, run agents on the host. It is simpler, avoids OAuth issues, sidesteps credential mounting complexity, and takes full advantage of the shared filesystem. You get all the benefits of Coast (isolated runtimes, port management, worktree switching) without any of the agent containerization overhead.\n\nThe next level of complexity beyond agent shells is mounting [MCP servers](MCP_SERVERS.md) into the Coast so the containerized agent has access to tools. This compounds the integration surface further and is covered separately. The capability is there if you need it, but most users should not.\n", "concepts_and_terminology/ASSIGN.md": "# Assign and Unassign\n\nAssign and unassign control which worktree a Coast instance is pointed at. See [Filesystem](FILESYSTEM.md) for how worktree switching works at the mount level.\n\n## Assign\n\n`coast assign` switches a Coast instance to a specific worktree. Coast creates the worktree if it does not already exist, updates the code inside the Coast, and restarts services according to the configured assign strategy.\n\n```bash\ncoast assign dev-1 --worktree feature/oauth\n```\n\n```text\nBefore:\n┌─── dev-1 ──────────────────┐\n│ branch: main │\n│ worktree: - │\n└────────────────────────────┘\n\ncoast assign dev-1 --worktree feature/oauth\n\nAfter:\n┌─── dev-1 ──────────────────┐\n│ branch: feature/oauth │\n│ worktree: feature/oauth │\n│ │\n│ postgres → skipped (none) │\n│ web → hot swapped │\n│ api → restarted │\n│ worker → rebuilt │\n└────────────────────────────┘\n```\n\nAfter assigning, `dev-1` is running the `feature/oauth` branch with all its services up.\n\n## Unassign\n\n`coast unassign` switches a Coast instance back to the project root (your main/master branch). The worktree association is removed and the Coast returns to running off the primary repository.\n\n```text\ncoast unassign dev-1\n\n┌─── dev-1 ──────────────────┐\n│ branch: main │\n│ worktree: - │\n└────────────────────────────┘\n```\n\n## Assign Strategies\n\nWhen a Coast is assigned to a new worktree, each service needs to know how to handle the code change. You configure this per-service in your [Coastfile](COASTFILE_TYPES.md) under `[assign]`:\n\n```toml\n[assign]\ndefault = \"restart\"\n\n[assign.services]\npostgres = \"none\"\nredis = \"none\"\nweb = \"hot\"\nworker = \"rebuild\"\n```\n\n```text\ncoast assign dev-1 --worktree feature/billing\n\n postgres (strategy: none) → skipped, unchanged between branches\n redis (strategy: none) → skipped, unchanged between branches\n web (strategy: hot) → filesystem swapped, file watcher picks it up\n api (strategy: restart) → container restarted\n worker (strategy: rebuild) → image rebuilt, container restarted\n```\n\nThe available strategies are:\n\n- **none** — do nothing. Use this for services that do not change between branches, such as Postgres or Redis.\n- **hot** — swap the filesystem only. The service stays running and picks up changes via mount propagation and file watchers (e.g., a dev server with hot reload).\n- **restart** — restart the service container. Use this for interpreted services that just need a process restart. This is the default.\n- **rebuild** — rebuild the service image and restart. Use this when the branch change affects the `Dockerfile` or build-time dependencies.\n\nYou can also specify rebuild triggers so that a service only rebuilds when specific files change:\n\n```toml\n[assign.rebuild_triggers]\nworker = [\"Dockerfile\", \"package.json\"]\n```\n\nIf none of the trigger files changed between branches, the service skips the rebuild even if the strategy is set to `rebuild`.\n\n## Deleted Worktrees\n\nIf an assigned worktree is deleted, the `coastd` daemon automatically unassigns that instance back to the main Git repository root.\n\n---\n\n> **Tip: Reducing assign latency in large codebases**\n>\n> Under the hood, the first assign to a new worktree bootstraps selected gitignored files into that worktree, and services with `[assign.rebuild_triggers]` may run `git diff --name-only` to decide whether a rebuild is necessary. In large codebases, that bootstrap step and unnecessary rebuilds tend to dominate assign time.\n>\n> Use `exclude_paths` in your Coastfile to shrink the gitignored bootstrap surface, use `\"hot\"` for services with file watchers, and keep `[assign.rebuild_triggers]` focused on true build-time inputs. If you need to refresh the ignored-file bootstrap manually for an existing worktree, run `coast assign --force-sync`. See [Performance Optimizations](PERFORMANCE_OPTIMIZATIONS.md) for a full guide.\n", @@ -368,7 +373,7 @@ "concepts_and_terminology/COASTGUARD.md": "# Coastguard\n\nCoastguard is Coast's local web UI (think: Coast's Docker Desktop-style interface), running on port `31415`. It is launched from the CLI:\n\n```bash\ncoast ui\n```\n\n![Coastguard project overview](../../assets/coastguard-overview.png)\n*The project dashboard showing running Coast instances, their branches/worktrees, and checkout state.*\n\n![Coastguard port mappings](../../assets/coastguard-ports.png)\n*The ports page for a specific Coast instance, showing canonical and dynamic port mappings for each service.*\n\n## What Coastguard Is Good For\n\nCoastguard gives you a visual control and observability surface for your project:\n\n- See projects, instances, statuses, branches, and checkout state.\n- Inspect [port mappings](PORTS.md) and jump directly into services.\n- View [logs](LOGS.md), runtime stats, and inspect data.\n- Browse [builds](BUILDS.md), image artifacts, [volumes](VOLUMES.md), and [secrets](SECRETS.md) metadata.\n- Navigate docs in-app while working.\n\n## Relationship to CLI and Daemon\n\nCoastguard does not replace the CLI. It complements it as the human-facing interface.\n\n- [`coast` CLI](CLI.md) is the automation interface for scripts, agent workflows, and tooling integrations.\n- Coastguard is the human interface for visual inspection, interactive debugging, and day-to-day operational visibility.\n- Both are clients of [`coastd`](DAEMON.md), so they stay in sync.\n\n", "concepts_and_terminology/COASTS.md": "# Coasts\n\nA Coast is a self-contained runtime of your project. It runs inside a [Docker-in-Docker container](RUNTIMES_AND_SERVICES.md), and multiple services (your web server, database, cache, etc.) can all run inside a single Coast instance.\n\n```text\n┌─── Coast: dev-1 (branch: feature/oauth) ──────────────┐\n│ │\n│ ┌─────────┐ ┌──────────┐ ┌─────────┐ │\n│ │ web │ │ postgres │ │ redis │ │\n│ │ :3000 │ │ :5432 │ │ :6379 │ │\n│ └─────────┘ └──────────┘ └─────────┘ │\n│ │\n│ dynamic ports: 62217, 55681, 56905 │\n└───────────────────────────────────────────────────────┘\n\n┌─── Coast: dev-2 (branch: feature/billing) ────────────┐\n│ │\n│ ┌─────────┐ ┌──────────┐ ┌─────────┐ │\n│ │ web │ │ postgres │ │ redis │ │\n│ │ :3000 │ │ :5432 │ │ :6379 │ │\n│ └─────────┘ └──────────┘ └─────────┘ │\n│ │\n│ dynamic ports: 63104, 57220, 58412 │\n└───────────────────────────────────────────────────────┘\n```\n\nEach Coast exposes its own set of [dynamic ports](PORTS.md) to the host machine, meaning you can access any running Coast at any time regardless of what else is running.\n\nWhen you [check out](CHECKOUT.md) a Coast, the project's canonical ports are mapped to it — so `localhost:3000` hits the checked-out Coast rather than a dynamic port.\n\n```text\ncoast checkout dev-1\n\nlocalhost:3000 ──→ dev-1 web\nlocalhost:5432 ──→ dev-1 postgres\nlocalhost:6379 ──→ dev-1 redis\n\ncoast checkout dev-2 (instant swap)\n\nlocalhost:3000 ──→ dev-2 web\nlocalhost:5432 ──→ dev-2 postgres\nlocalhost:6379 ──→ dev-2 redis\n```\n\nTypically a Coast is [assigned to a specific worktree](ASSIGN.md). This is how you run multiple worktrees of the same project in parallel without port conflicts or volume collisions.\n\nYou create Coast instances with [`coast run`](RUN.md). It is up to you to spin Coasts up and down as you see fit. You probably would not want 20 Coasts of a memory-intensive project running at once, but to each their own.\n", "concepts_and_terminology/DAEMON.md": "# Coast Daemon\n\nThe Coast daemon (`coastd`) is the long-running local process that does the actual orchestration work. The [CLI](CLI.md) and [Coastguard](COASTGUARD.md) are clients; `coastd` is the control plane behind them.\n\n## Architecture at a Glance\n\n```text\ncoast CLI (automation) -----+\n +--> coastd daemon\nCoastguard UI (human) ------+ |\n +--> Coasts\n +--> Ports\n +--> State\n```\n\nThe CLI sends requests to `coastd` over a local Unix socket; Coastguard connects over a WebSocket. The daemon applies changes to runtime state.\n\n## What It Does\n\n`coastd` handles the operations that need persistent state and background coordination:\n\n- Tracks Coast instances, builds, and shared services.\n- Creates, starts, stops, and removes Coast runtimes.\n- Applies assign/unassign/checkout operations.\n- Manages canonical and dynamic [port forwarding](PORTS.md).\n- Streams [logs](LOGS.md), status, and runtime events to CLI and UI clients.\n\nIn short: if you run `coast run`, `coast assign`, `coast checkout`, or `coast ls`, the daemon is the component doing the work.\n\n## How It Runs\n\nYou can run the daemon in two common ways:\n\n```bash\n# Register daemon auto-start at login (recommended)\ncoast daemon install\n\n# Manual start mode\ncoast daemon start\n```\n\nIf you skip daemon install, you need to start it yourself each session before using Coast commands.\n\n## Reporting Bugs\n\nIf you run into issues, please include the `coastd` daemon logs when submitting a bug report. The logs contain the context needed to diagnose most problems:\n\n```bash\ncoast daemon logs\n```\n\n", - "concepts_and_terminology/EXEC_AND_DOCKER.md": "# Exec & Docker\n\n`coast exec` drops you into a shell inside the Coast's DinD container. Your working directory is `/workspace` — the [bind-mounted project root](FILESYSTEM.md) where your Coastfile lives. This is the primary way to run commands, inspect files, or debug services inside a Coast from your host machine.\n\n`coast docker` is the companion command for talking to the inner Docker daemon directly.\n\n## `coast exec`\n\nOpen a shell inside a Coast instance:\n\n```bash\ncoast exec dev-1\n```\n\nThis starts an `sh` session at `/workspace`. Coast containers are Alpine-based, so the default shell is `sh`, not `bash`.\n\nYou can also run a specific command without entering an interactive shell:\n\n```bash\ncoast exec dev-1 ls -la\ncoast exec dev-1 -- npm install\ncoast exec dev-1 -- go test ./...\n```\n\nEverything after the instance name is passed as the command. Use `--` to separate flags that belong to your command from flags that belong to `coast exec`.\n\n### Working Directory\n\nThe shell starts at `/workspace`, which is your host project root bind-mounted into the container. This means your source code, Coastfile, and all project files are right there:\n\n```text\n/workspace $ ls\nCoastfile README.md apps/ packages/\nCoastfile.light go.work infra/ scripts/\nCoastfile.snap go.work.sum package-lock.json\n```\n\nAny changes you make to files under `/workspace` are reflected on the host immediately — it is a bind mount, not a copy.\n\n### Interactive vs Non-Interactive\n\nWhen stdin is a TTY (you are typing at a terminal), `coast exec` bypasses the daemon entirely and runs `docker exec -it` directly for full TTY passthrough. This means colors, cursor movement, tab completion, and interactive programs all work as expected.\n\nWhen stdin is piped or scripted (CI, agent workflows, `coast exec dev-1 -- some-command | grep foo`), the request goes through the daemon and returns structured stdout, stderr, and an exit code.\n\n### File Permissions\n\nThe exec runs as your host user's UID:GID, so files created inside the Coast have the correct ownership on the host. No permission mismatches between host and container.\n\n## `coast docker`\n\nWhile `coast exec` gives you a shell in the DinD container itself, `coast docker` lets you run Docker CLI commands against the **inner** Docker daemon — the one managing your compose services.\n\n```bash\ncoast docker dev-1 # defaults to: docker ps\ncoast docker dev-1 ps # same as above\ncoast docker dev-1 compose ps # docker compose ps (inner services)\ncoast docker dev-1 images # list images in the inner daemon\ncoast docker dev-1 compose logs web # docker compose logs for a service\n```\n\nEvery command you pass is prefixed with `docker` automatically. So `coast docker dev-1 compose ps` runs `docker compose ps` inside the Coast container, talking to the inner daemon.\n\n### `coast exec` vs `coast docker`\n\nThe distinction is what you are targeting:\n\n| Command | Runs as | Target |\n|---|---|---|\n| `coast exec dev-1 ls /workspace` | `sh -c \"ls /workspace\"` in DinD container | The Coast container itself (your project files, installed tools) |\n| `coast docker dev-1 ps` | `docker ps` in DinD container | The inner Docker daemon (your compose service containers) |\n| `coast docker dev-1 compose logs web` | `docker compose logs web` in DinD container | A specific compose service's logs via the inner daemon |\n\nUse `coast exec` for project-level work — running tests, installing dependencies, inspecting files. Use `coast docker` when you need to see what the inner Docker daemon is doing — container status, images, networks, compose operations.\n\n## Coastguard Exec Tab\n\nThe Coastguard web UI provides a persistent interactive terminal connected over WebSocket.\n\n![Exec tab in Coastguard](../../assets/coastguard-exec.png)\n*The Coastguard Exec tab showing a shell session at /workspace inside a Coast instance.*\n\nThe terminal is powered by xterm.js and offers:\n\n- **Persistent sessions** — terminal sessions survive page navigation and browser refreshes. Reconnecting replays the scrollback buffer so you pick up where you left off.\n- **Multiple tabs** — open several shells at once. Each tab is an independent session.\n- **[Agent shell](AGENT_SHELLS.md) tabs** — spawn dedicated agent shells for AI coding agents, with active/inactive status tracking.\n- **Fullscreen mode** — expand the terminal to fill the screen (Escape to exit).\n\nBeyond the instance-level exec tab, Coastguard also provides terminal access at other levels:\n\n- **Service exec** — click into an individual service from the Services tab to get a shell inside that specific inner container (this does a double `docker exec` — first into the DinD container, then into the service container).\n- **[Shared service](SHARED_SERVICES.md) exec** — get a shell inside a host-level shared service container.\n- **Host terminal** — a shell on your host machine at the project root, without entering a Coast at all.\n\n## When to Use Which\n\n- **`coast exec`** — run project-level commands (npm install, go test, file inspection, debugging) inside the DinD container.\n- **`coast docker`** — inspect or manage the inner Docker daemon (container status, images, networks, compose operations).\n- **Coastguard Exec tab** — interactive debugging with persistent sessions, multiple tabs, and agent shell support. Best when you want to keep several terminals open while navigating the rest of the UI.\n- **`coast logs`** — for reading service output, use `coast logs` instead of `coast docker compose logs`. See [Logs](LOGS.md).\n- **`coast ps`** — for checking service status, use `coast ps` instead of `coast docker compose ps`. See [Runtimes and Services](RUNTIMES_AND_SERVICES.md).\n", + "concepts_and_terminology/EXEC_AND_DOCKER.md": "# Exec & Docker\n\n`coast exec` drops you into a shell inside the Coast's DinD container. Your working directory is `/workspace` — the [bind-mounted project root](FILESYSTEM.md) where your Coastfile lives. This is the primary way to run commands, inspect files, or debug services inside a Coast from your host machine.\n\n`coast docker` is the companion command for talking to the inner Docker daemon directly.\n\n## `coast exec`\n\nOpen a shell inside a Coast instance:\n\n```bash\ncoast exec dev-1\n```\n\nThis starts an `sh` session at `/workspace`. Coast containers are Alpine-based, so the default shell is `sh`, not `bash`.\n\nYou can also run a specific command without entering an interactive shell:\n\n```bash\ncoast exec dev-1 ls -la\ncoast exec dev-1 -- npm install\ncoast exec dev-1 -- go test ./...\ncoast exec dev-1 --service web\ncoast exec dev-1 --service web -- php artisan test\n```\n\nEverything after the instance name is passed as the command. Use `--` to separate flags that belong to your command from flags that belong to `coast exec`.\n\nPass `--service ` to target a specific compose service container instead of the outer Coast container. Pass `--root` when you need raw container-root access instead of Coast's default host UID:GID mapping.\n\n### Working Directory\n\nThe shell starts at `/workspace`, which is your host project root bind-mounted into the container. This means your source code, Coastfile, and all project files are right there:\n\n```text\n/workspace $ ls\nCoastfile README.md apps/ packages/\nCoastfile.light go.work infra/ scripts/\nCoastfile.snap go.work.sum package-lock.json\n```\n\nAny changes you make to files under `/workspace` are reflected on the host immediately — it is a bind mount, not a copy.\n\n### Interactive vs Non-Interactive\n\nWhen stdin is a TTY (you are typing at a terminal), `coast exec` bypasses the daemon entirely and runs `docker exec -it` directly for full TTY passthrough. This means colors, cursor movement, tab completion, and interactive programs all work as expected.\n\nWhen stdin is piped or scripted (CI, agent workflows, `coast exec dev-1 -- some-command | grep foo`), the request goes through the daemon and returns structured stdout, stderr, and an exit code.\n\n### File Permissions\n\nThe exec runs as your host user's UID:GID, so files created inside the Coast have the correct ownership on the host. No permission mismatches between host and container.\n\n## `coast docker`\n\nWhile `coast exec` gives you a shell in the DinD container itself, `coast docker` lets you run Docker CLI commands against the **inner** Docker daemon — the one managing your compose services.\n\n```bash\ncoast docker dev-1 # defaults to: docker ps\ncoast docker dev-1 ps # same as above\ncoast docker dev-1 compose ps # docker compose ps for the active Coast-managed stack\ncoast docker dev-1 images # list images in the inner daemon\ncoast docker dev-1 compose logs web # docker compose logs for a service\n```\n\nEvery command you pass is prefixed with `docker` automatically. So `coast docker dev-1 compose ps` runs `docker compose ps` inside the Coast container, talking to the inner daemon.\n\n### `coast exec` vs `coast docker`\n\nThe distinction is what you are targeting:\n\n| Command | Runs as | Target |\n|---|---|---|\n| `coast exec dev-1 ls /workspace` | `sh -c \"ls /workspace\"` in DinD container | The Coast container itself (your project files, installed tools) |\n| `coast exec dev-1 --service web` | `docker exec ... sh` in the resolved inner service container | A specific compose service container |\n| `coast docker dev-1 ps` | `docker ps` in DinD container | The inner Docker daemon (your compose service containers) |\n| `coast docker dev-1 compose logs web` | `docker compose logs web` in DinD container | A specific compose service's logs via the inner daemon |\n\nUse `coast exec` for project-level work — running tests, installing dependencies, inspecting files. Use `coast docker` when you need to see what the inner Docker daemon is doing — container status, images, networks, compose operations.\n\n## Coastguard Exec Tab\n\nThe Coastguard web UI provides a persistent interactive terminal connected over WebSocket.\n\n![Exec tab in Coastguard](../../assets/coastguard-exec.png)\n*The Coastguard Exec tab showing a shell session at /workspace inside a Coast instance.*\n\nThe terminal is powered by xterm.js and offers:\n\n- **Persistent sessions** — terminal sessions survive page navigation and browser refreshes. Reconnecting replays the scrollback buffer so you pick up where you left off.\n- **Multiple tabs** — open several shells at once. Each tab is an independent session.\n- **[Agent shell](AGENT_SHELLS.md) tabs** — spawn dedicated agent shells for AI coding agents, with active/inactive status tracking.\n- **Fullscreen mode** — expand the terminal to fill the screen (Escape to exit).\n\nBeyond the instance-level exec tab, Coastguard also provides terminal access at other levels:\n\n- **Service exec** — click into an individual service from the Services tab to get a shell inside that specific inner container (this does a double `docker exec` — first into the DinD container, then into the service container).\n- **[Shared service](SHARED_SERVICES.md) exec** — get a shell inside a host-level shared service container.\n- **Host terminal** — a shell on your host machine at the project root, without entering a Coast at all.\n\n## When to Use Which\n\n- **`coast exec`** — run project-level commands inside the DinD container, or pass `--service` to open a shell or run a command inside a specific compose service container.\n- **`coast docker`** — inspect or manage the inner Docker daemon (container status, images, networks, compose operations).\n- **Coastguard Exec tab** — interactive debugging with persistent sessions, multiple tabs, and agent shell support. Best when you want to keep several terminals open while navigating the rest of the UI.\n- **`coast logs`** — for reading service output, use `coast logs` instead of `coast docker compose logs`. See [Logs](LOGS.md).\n- **`coast ps`** — for checking service status, use `coast ps` instead of `coast docker compose ps`. See [Runtimes and Services](RUNTIMES_AND_SERVICES.md).\n", "concepts_and_terminology/FILESYSTEM.md": "# Filesystem\n\nYour host machine and every Coast instance share the same project files. The host project root is mounted read-write into the DinD container at `/host-project`, and Coast bind-mounts the active working tree at `/workspace`. This is what makes it possible for an agent running on your host machine to edit code while services inside the Coast pick up the changes in real time.\n\n## The Shared Mount\n\n```text\nHost machine\n│\n├── ~/dev/my-app/ (project root)\n│ ├── src/\n│ ├── Coastfile\n│ ├── docker-compose.yml\n│ └── .worktrees/ (worktrees, gitignored)\n│ ├── feature-auth/\n│ └── feature-billing/\n│\n└── Docker daemon (host)\n │\n └── Coast: dev-1 (docker:dind)\n │\n ├── /host-project ← Docker bind mount of project root (RW, fixed)\n │\n ├── /workspace ← mount --bind /host-project (switchable)\n │ ├── src/ same files, same bytes, instant sync\n │ ├── Coastfile\n │ └── docker-compose.yml\n │\n └── Inner Docker daemon\n └── web service\n └── /app ← compose bind mount from /workspace/src\n```\n\nThe host project root is mounted read-write at `/host-project` inside the [DinD container](RUNTIMES_AND_SERVICES.md) when the container is created. After the container starts, an in-container `mount --bind /host-project /workspace` creates the working `/workspace` path with shared mount propagation (`mount --make-rshared`), so inner compose services that bind-mount subdirectories of `/workspace` see the correct content.\n\nThis two-stage approach exists for a reason: the Docker bind mount at `/host-project` is fixed at container creation and cannot be changed without recreating the container. But the Linux bind mount at `/workspace` inside the container can be unmounted and re-bound to a different subdirectory — a worktree — without touching the container lifecycle. This is what makes `coast assign` fast.\n\n`/workspace` is read-write. File changes flow both directions instantly. Save a file on the host and a dev server inside the Coast picks it up. Create a file inside the Coast and it appears on the host.\n\n## Host Agents and Coast\n\n```text\n┌─── Host machine ──────────────────────────────────────────┐\n│ │\n│ AI Agent (Cursor, Claude Code, etc.) │\n│ │ │\n│ ├── reads/writes files at /src/ │\n│ │ ↕ (instant, same filesystem) │\n│ ├── coast logs dev-1 --service web --tail 50 │\n│ ├── coast ps dev-1 │\n│ └── coast exec dev-1 -- npm test │\n│ │\n├───────────────────────────────────────────────────────────┤\n│ │\n│ Coast: dev-1 │\n│ └── /workspace/src/ ← same bytes as host project/src │\n│ └── web service picks up changes on save │\n│ │\n└───────────────────────────────────────────────────────────┘\n```\n\nBecause the filesystem is shared, an AI coding agent running on the host can edit files freely and the running services inside the Coast see the changes immediately. The agent does not need to run inside the Coast container — it operates from the host as normal.\n\nWhen the agent needs runtime information — logs, service status, test output — it calls Coast CLI commands from the host:\n\n- `coast logs dev-1 --service web --tail 50` for service output (see [Logs](LOGS.md))\n- `coast ps dev-1` for service status (see [Runtimes and Services](RUNTIMES_AND_SERVICES.md))\n- `coast exec dev-1 -- npm test` to run commands inside the Coast (see [Exec & Docker](EXEC_AND_DOCKER.md))\n\nThis is the fundamental architectural advantage: **code editing happens on the host, runtime happens in the Coast, and the shared filesystem bridges them.** The host agent never needs to be \"inside\" the Coast to do its work.\n\n## Worktree Switching\n\nWhen `coast assign` switches a Coast to a different worktree, it remounts `/workspace` to point at that git worktree instead of the project root:\n\n```text\ncoast assign dev-1 --worktree feature-auth\n\nBefore: /workspace ←──mount── /host-project (project root)\nAfter: /workspace ←──mount── /host-project/.worktrees/feature-auth (worktree)\n```\n\nThe worktree is created on the host at `{project_root}/.worktrees/{worktree_name}`. The `.worktrees` directory name is configurable via `worktree_dir` in your Coastfile and should be in your `.gitignore`.\n\nIf the worktree is new, Coast bootstraps selected gitignored files from the project root before the remount. It enumerates ignored files with `git ls-files --others --ignored --exclude-standard`, filters out common heavy directories plus any configured `exclude_paths`, then uses `rsync --files-from` with `--link-dest` to hardlink the selected files into the worktree. Coast records that bootstrap in internal worktree metadata and skips it on later assigns to the same worktree unless you explicitly refresh it with `coast assign --force-sync`.\n\nInside the container, `/workspace` is lazy-unmounted and re-bound to the worktree subdirectory at `/host-project/.worktrees/{branch_name}`. This remount is fast — it does not recreate the DinD container or restart the inner Docker daemon. Compose and bare services may still be recreated or restarted after the remount so their bind mounts resolve through the new `/workspace`.\n\nLarge dependency directories such as `node_modules` are not part of this generic bootstrap path. Those are typically handled through service-specific caches or volumes instead.\n\nIf you use `[assign.rebuild_triggers]`, Coast also runs `git diff --name-only ..` on the host to decide whether a service marked `rebuild` can be downgraded to `restart`. See [Assign and Unassign](ASSIGN.md) and [Performance Optimizations](PERFORMANCE_OPTIMIZATIONS.md) for the details that affect assign latency.\n\n`coast unassign` reverts `/workspace` back to `/host-project` (the project root). `coast start` after a stop re-applies the correct mount based on whether the instance has an assigned worktree.\n\n## All Mounts\n\nEvery Coast container has these mounts:\n\n| Path | Type | Access | Purpose |\n|---|---|---|---|\n| `/workspace` | bind mount (in-container) | RW | Project root or worktree. Switchable on assign. |\n| `/host-project` | Docker bind mount | RW | Raw project root. Fixed at container creation. |\n| `/image-cache` | Docker bind mount | RO | Pre-pulled OCI tarballs from `~/.coast/image-cache/`. |\n| `/coast-artifact` | Docker bind mount | RO | Build artifact with rewritten compose files. |\n| `/coast-override` | Docker bind mount | RO | Generated compose overrides for [shared services](SHARED_SERVICES.md). |\n| `/var/lib/docker` | Named volume | RW | Inner Docker daemon state. Persists across container removal. |\n\nThe read-only mounts are infrastructure — they carry the build artifact, cached images, and compose overrides that Coast generates. You interact with them indirectly through `coast build` and the Coastfile. The read-write mounts are where your code lives and where the inner daemon stores its state.\n", "concepts_and_terminology/LOGS.md": "# Logs\n\nServices inside a Coast run in nested containers — your compose services are managed by an inner Docker daemon inside a DinD container. This means host-level logging tools cannot see them. If your workflow includes a logging MCP that reads Docker logs on the host, it will only see the outer DinD container, not the web server, database, or worker running inside it.\n\nThe solution is `coast logs`. Any agent or tool that needs to read service output from a Coast instance must use the Coast CLI instead of host-level Docker log access.\n\n## The MCP Tradeoff\n\nIf you are using an AI agent with a logging MCP (a tool that captures Docker container logs from your host — see [MCP Servers](MCP_SERVERS.md)), that MCP will not work for services running inside a Coast. The host Docker daemon sees one container per Coast instance — the DinD container — and its logs are just the inner Docker daemon's startup output.\n\nTo capture the actual service logs, instruct your agent to use:\n\n```bash\ncoast logs --service --tail \n```\n\nFor example, if your agent needs to inspect why a backend service is failing:\n\n```bash\ncoast logs dev-1 --service backend --tail 100\n```\n\nThis is the equivalent of `docker compose logs` but routed through the Coast daemon into the inner DinD container. If you have agent rules or system prompts that reference a logging MCP, you will need to add an instruction that overrides this behavior when working inside a Coast.\n\n## `coast logs`\n\nThe CLI provides several ways to read logs from a Coast instance:\n\n```bash\ncoast logs dev-1 # last 200 lines, all services\ncoast logs dev-1 --service web # last 200 lines, web only\ncoast logs dev-1 --tail 50 # last 50 lines, then follow\ncoast logs dev-1 --tail # all lines, then follow\ncoast logs dev-1 --service backend -f # follow mode (stream new entries)\ncoast logs dev-1 --service web --tail 100 # last 100 lines + follow\n```\n\nWithout `--tail` or `-f`, the command returns the last 200 lines and exits. With `--tail`, it streams the requested number of lines and then continues following new output in real time. `-f` / `--follow` enables follow mode on its own.\n\nThe output uses the compose log format with a service prefix on each line:\n\n```text\nweb | 2026/02/28 01:49:34 Listening on :3000\nbackend | 2026/02/28 01:49:34 [INFO] Server started on :8080\nbackend | 2026/02/28 01:49:34 [ProcessCreditsJob] starting at 2026-02-28T01:49:34Z\nredis | 1:M 28 Feb 2026 01:49:30.123 * Ready to accept connections\n```\n\nYou can also filter by service with the legacy positional syntax (`coast logs dev-1 web`), but the `--service` flag is preferred.\n\n## Coastguard Logs Tab\n\nThe Coastguard web UI provides a richer log viewing experience with real-time streaming over WebSocket.\n\n![Logs tab in Coastguard](../../assets/coastguard-logs.png)\n*The Coastguard Logs tab streaming backend service output with service filtering and search.*\n\nThe Logs tab offers:\n\n- **Real-time streaming** — logs arrive over a WebSocket connection as they are produced, with a status indicator showing connection state.\n- **Service filter** — a dropdown populated from the log stream's service prefixes. Select a single service to focus on its output.\n- **Search** — filter displayed lines by text or regex (toggle the asterisk button for regex mode). Matching terms are highlighted.\n- **Line counts** — shows filtered lines vs total lines (e.g. \"200 / 971 lines\").\n- **Clear** — truncates the inner container log files and resets the viewer.\n- **Fullscreen** — expand the log viewer to fill the screen.\n\nLog lines are rendered with ANSI color support, log level highlighting (ERROR in red, WARN in amber, INFO in blue, DEBUG in gray), timestamp dimming, and colored service badges for visual distinction between services.\n\nShared services running on the host daemon have their own log viewer accessible from the Shared Services tab. See [Shared Services](SHARED_SERVICES.md) for details.\n\n## How It Works\n\nWhen you run `coast logs`, the daemon executes `docker compose logs` inside the DinD container via `docker exec` and streams the output back to your terminal (or to the Coastguard UI over WebSocket).\n\n```text\ncoast logs dev-1 --service web --tail 50\n │\n ├── CLI sends LogsRequest to daemon (Unix socket)\n │\n ├── Daemon resolves instance → container ID\n │\n ├── Daemon exec's into DinD container:\n │ docker compose logs --tail 50 --follow web\n │\n └── Output streams back chunk by chunk\n └── CLI prints to stdout / Coastguard renders in UI\n```\n\nFor [bare services](BARE_SERVICES.md), the daemon tails the log files at `/var/log/coast-services/` instead of calling `docker compose logs`. The output format is the same (`service | line`) so service filtering works identically in both cases.\n\n## Related Commands\n\n- `coast ps ` — check which services are running and their status. See [Runtimes and Services](RUNTIMES_AND_SERVICES.md).\n- [`coast exec `](EXEC_AND_DOCKER.md) — open a shell inside the Coast container for manual debugging.\n", "concepts_and_terminology/LOOKUP.md": "# Lookup\n\n`coast lookup` discovers which Coast instances are running for the caller's current working directory. It is the first command a host-side agent should run to orient itself — \"I'm editing code here, which Coast(s) should I interact with?\"\n\n```bash\ncoast lookup\n```\n\nLookup detects whether you are inside a [worktree](ASSIGN.md) or at the project root, queries the daemon for matching instances, and prints the results with ports, URLs, and example commands.\n\n## Why This Exists\n\nAn AI coding agent running on the host (Cursor, Claude Code, Codex, etc.) edits files through the [shared filesystem](FILESYSTEM.md) and calls Coast CLI commands for runtime operations. But the agent first needs to answer a basic question: **which Coast instance corresponds to the directory I'm working in?**\n\nWithout `coast lookup`, the agent would have to run `coast ls`, parse the full instance table, figure out which worktree it's in, and cross-reference. `coast lookup` does all of that in one step and returns structured output that agents can consume directly.\n\nThis command should be included in any top-level SKILL.md, AGENTS.md, or rules file for agent workflows that use Coast. It is the entry point for an agent to discover its runtime context.\n\n## Output Modes\n\n### Default (human-readable)\n\n```bash\ncoast lookup\n```\n\n```text\nCoast instances for worktree feature/oauth (my-app):\n\n dev-1 running ★ checked out\n\n Primary URL: http://dev-1.localhost:62217\n\n SERVICE CANONICAL DYNAMIC\n ★ web 3000 62217\n api 8080 63889\n postgres 5432 55681\n\n Examples (exec starts at the workspace root where your Coastfile is, cd to your target directory first):\n coast exec dev-1 -- sh -c \"cd && \"\n coast logs dev-1 --service \n coast ps dev-1\n```\n\nThe examples section reminds agents (and humans) that `coast exec` starts at the workspace root — the directory where the Coastfile lives. To run a command in a subdirectory, `cd` to it inside the exec.\n\n### Compact (`--compact`)\n\nReturns a JSON array of instance names. Designed for scripts and agent tooling that just needs to know which instances to target.\n\n```bash\ncoast lookup --compact\n```\n\n```text\n[\"dev-1\"]\n```\n\nMultiple instances on the same worktree:\n\n```text\n[\"dev-1\",\"dev-2\"]\n```\n\nNo matches:\n\n```text\n[]\n```\n\n### JSON (`--json`)\n\nReturns the full structured response as pretty-printed JSON. Designed for agents that need ports, URLs, and status in a machine-readable format.\n\n```bash\ncoast lookup --json\n```\n\n```json\n{\n \"project\": \"my-app\",\n \"worktree\": \"feature/oauth\",\n \"project_root\": \"/Users/dev/my-app\",\n \"instances\": [\n {\n \"name\": \"dev-1\",\n \"status\": \"Running\",\n \"checked_out\": true,\n \"branch\": \"feature/oauth\",\n \"primary_url\": \"http://dev-1.localhost:62217\",\n \"ports\": [\n { \"logical_name\": \"web\", \"canonical_port\": 3000, \"dynamic_port\": 62217, \"is_primary\": true },\n { \"logical_name\": \"api\", \"canonical_port\": 8080, \"dynamic_port\": 63889, \"is_primary\": false }\n ]\n }\n ]\n}\n```\n\n## How It Resolves\n\nLookup walks up from the current working directory to find the nearest Coastfile, then determines which worktree you are in:\n\n1. If your cwd is under `{project_root}/{worktree_dir}/{name}/...`, lookup finds instances assigned to that worktree.\n2. If your cwd is the project root (or any directory not inside a worktree), lookup finds instances with **no worktree assigned** — those still pointed at the project root.\n\nThis means lookup works from subdirectories too. If you are in `my-app/.worktrees/feature-oauth/src/api/`, lookup still resolves `feature-oauth` as the worktree.\n\n## Exit Codes\n\n| Code | Meaning |\n|------|---------|\n| 0 | One or more matching instances found |\n| 1 | No matching instances (empty result) |\n\nThis makes lookup usable in shell conditionals:\n\n```bash\nif coast lookup > /dev/null 2>&1; then\n coast exec dev-1 -- sh -c \"cd src && npm test\"\nfi\n```\n\n## For Agent Workflows\n\nThe typical agent integration pattern:\n\n1. Agent starts working in a worktree directory.\n2. Agent runs `coast lookup` to discover instance names, ports, URLs, and example commands.\n3. Agent uses the instance name for all subsequent Coast commands: `coast exec`, `coast logs`, `coast ps`.\n\n```text\n┌─── Agent (host machine) ────────────────────────────┐\n│ │\n│ 1. coast lookup │\n│ → instance names, ports, URLs, examples │\n│ 2. coast exec dev-1 -- sh -c \"cd src && npm test\" │\n│ 3. coast logs dev-1 --service web --tail 50 │\n│ 4. coast ps dev-1 │\n│ │\n└──────────────────────────────────────────────────────┘\n```\n\nIf the agent is working across multiple worktrees, it runs `coast lookup` from each worktree directory to resolve the correct instance for each context.\n\nSee also [Filesystem](FILESYSTEM.md) for how host agents interact with Coast, [Assign and Unassign](ASSIGN.md) for worktree concepts, and [Exec & Docker](EXEC_AND_DOCKER.md) for running commands inside a Coast.\n", @@ -384,18 +389,20 @@ "concepts_and_terminology/SHARED_SERVICES.md": "# Shared Services\n\nShared services are database and infrastructure containers (Postgres, Redis, MongoDB, etc.) that run on your host Docker daemon rather than inside a Coast. Coast instances connect to them over a bridge network, so every Coast talks to the same service on the same host volume.\n\n![Shared services in Coastguard](../../assets/coastguard-shared-services.png)\n*The Coastguard shared services tab showing host-managed Postgres, Redis, and MongoDB.*\n\n## How They Work\n\nWhen you declare a shared service in your Coastfile, Coast starts it on the host daemon and removes it from the compose stack that runs inside each Coast container. Coasts are then configured to route service-name traffic back to the shared container while preserving the service's container-side port inside the Coast.\n\n```text\nHost Docker daemon\n |\n +--> postgres (host volume: infra_postgres_data)\n +--> redis (host volume: infra_redis_data)\n +--> mongodb (host volume: infra_mongodb_data)\n |\n +--> Coast: dev-1 --bridge network--> host postgres, redis, mongodb\n +--> Coast: dev-2 --bridge network--> host postgres, redis, mongodb\n```\n\nBecause shared services reuse your existing host volumes, any data you already have from running `docker-compose up` locally is immediately available to your Coasts.\n\nThis distinction matters when you use mapped ports:\n\n```toml\n[shared_services.postgis]\nimage = \"ghcr.io/baosystems/postgis:12-3.3\"\nports = [\"5433:5432\"]\n```\n\n- On the host, the shared service is published on `localhost:5433`.\n- Inside every Coast, app containers still connect to `postgis:5432`.\n- A bare integer like `5432` is shorthand for the identity mapping `\"5432:5432\"`.\n\n## When to Use Shared Services\n\n- Your project has MCP integrations that connect to a local database — shared services let those continue to work without dynamic port discovery. If you publish the shared service on the same host port your tools already use (for example `ports = [5432]`), those tools keep working unchanged. If you publish it on a different host port (for example `\"5433:5432\"`), host-side tools should use that host port while Coasts continue using the container port.\n- You want lighter Coast instances since they do not need to run their own database containers.\n- You do not need data isolation between Coast instances (every instance sees the same data).\n- You are running coding agents on the host (see [Filesystem](FILESYSTEM.md)) and want them to access database state without routing through [`coast exec`](EXEC_AND_DOCKER.md). With shared services, the agent's existing database tools and MCPs work unchanged.\n\nSee the [Volume Topology](VOLUMES.md) page for alternatives when you do need isolation.\n\n## Volume Disambiguation Warning\n\nDocker volume names are not always globally unique. If you run `docker-compose up` from multiple different projects, the host volumes that Coast attaches to shared services may not be the ones you expect.\n\nBefore starting Coasts with shared services, make sure the last `docker-compose up` you ran was from the project you intend to use with Coasts. This ensures the host volumes match what your Coastfile expects.\n\n## Troubleshooting\n\nIf your shared services appear to be pointing at the wrong host volume:\n\n1. Open the [Coastguard](COASTGUARD.md) UI (`coast ui`).\n2. Navigate to the **Shared Services** tab.\n3. Select the affected services and click **Remove**.\n4. Click **Refresh Shared Services** to recreate them from your current Coastfile configuration.\n\nThis tears down and recreates the shared service containers, reattaching them to the correct host volumes.\n", "concepts_and_terminology/TROUBLESHOOTING.md": "# Troubleshooting\n\nMost issues with Coasts come from stale state, orphaned Docker resources, or a daemon that got out of sync. This page covers the escalation path from mild to nuclear.\n\n## Doctor\n\nIf things feel off — instances show as running but nothing responds, ports seem stuck, or the UI shows stale data — start with `coast doctor`:\n\n```bash\ncoast doctor\n```\n\nDoctor scans the state database and Docker for inconsistencies: orphaned instance records with missing containers, dangling containers with no state record, and shared services marked running that are actually dead. It fixes what it finds automatically.\n\nTo preview what it would do without changing anything:\n\n```bash\ncoast doctor --dry-run\n```\n\n## Daemon Restart\n\nIf the daemon itself seems unresponsive or you suspect it is in a bad state, restart it:\n\n```bash\ncoast daemon restart\n```\n\nThis sends a graceful shutdown signal, waits for the daemon to exit, and starts a fresh process. Your instances and state are preserved.\n\n## Removing a Single Project\n\nIf the problem is isolated to one project, you can remove its build artifacts and associated Docker resources without affecting anything else:\n\n```bash\ncoast rm-build my-project\n```\n\nThis deletes the project's artifact directory, Docker images, volumes, and containers. It asks for confirmation first. Pass `--force` to skip the prompt.\n\n## Missing Shared Service Images\n\nIf `coast run` fails while creating a shared service with an error like `No such image: postgres:15`, the image is missing from your host Docker daemon.\n\nThis most commonly happens when your `Coastfile` defines `shared_services` such as Postgres or Redis and Docker has not pulled those images yet.\n\nPull the missing image, then run the instance again:\n\n```bash\ndocker pull postgres:15\ndocker pull redis:7\ncoast run my-instance\n```\n\nIf you are not sure which image is missing, the failing `coast run` output will include the image name in the Docker error. After a failed provisioning attempt, Coasts cleans up the partial instance automatically, so seeing the instance return to `stopped` is expected.\n\n## Factory Reset with Nuke\n\nWhen nothing else works — or you just want a completely clean slate — `coast nuke` performs a full factory reset:\n\n```bash\ncoast nuke\n```\n\nThis will:\n\n1. Stop the `coastd` daemon.\n2. Remove **all** coast-managed Docker containers.\n3. Remove **all** coast-managed Docker volumes.\n4. Remove **all** coast-managed Docker networks.\n5. Remove **all** coast Docker images.\n6. Delete the entire `~/.coast/` directory (state database, builds, logs, secrets, image cache).\n7. Recreate `~/.coast/` and restart the daemon so coast is immediately usable again.\n\nBecause this destroys everything, you must type `nuke` at the confirmation prompt:\n\n```text\n$ coast nuke\nWARNING: This will permanently destroy ALL coast data:\n\n - Stop the coastd daemon\n - Remove all coast-managed Docker containers\n - Remove all coast-managed Docker volumes\n - Remove all coast-managed Docker networks\n - Remove all coast Docker images\n - Delete ~/.coast/ (state DB, builds, logs, secrets, image cache)\n\nType \"nuke\" to confirm:\n```\n\nPass `--force` to skip the prompt (useful in scripts):\n\n```bash\ncoast nuke --force\n```\n\nAfter a nuke, coast is ready to use — the daemon is running and the home directory exists. You just need to `coast build` and `coast run` your projects again.\n\n## Reporting Bugs\n\nIf you hit a problem that is not resolved by any of the above, include the daemon logs when reporting:\n\n```bash\ncoast daemon logs\n```\n", "concepts_and_terminology/VOLUMES.md": "# Volume Topology\n\nCoast provides three volume strategies that control how data-heavy services (databases, caches, etc.) store and share their data across Coast instances. Choosing the right strategy depends on how much isolation you need and how much overhead you can tolerate.\n\n## Shared Services\n\n[Shared services](SHARED_SERVICES.md) run on your host Docker daemon, outside of any Coast container. Services like Postgres, MongoDB, and Redis stay on the host machine and Coast instances route their calls back to the host over a bridge network.\n\n```text\nHost machine\n |\n +--> Postgres (host daemon, existing volume)\n +--> Redis (host daemon, existing volume)\n |\n +--> Coast: dev-1 --connects to--> host Postgres, host Redis\n +--> Coast: dev-2 --connects to--> host Postgres, host Redis\n```\n\nThere is no data isolation between instances — every Coast talks to the same database. In return you get:\n\n- Lighter Coast instances since they do not run their own database containers.\n- Your existing host volumes are reused directly, so any data you already have is available immediately.\n- MCP integrations that connect to your local database continue to work out of the box.\n\nThis is configured in your [Coastfile](COASTFILE_TYPES.md) under `[shared_services]`.\n\n## Shared Volumes\n\nShared volumes mount a single Docker volume that is shared across all Coast instances. The services themselves (Postgres, Redis, etc.) run inside each Coast container, but they all read and write to the same underlying volume.\n\n```text\nCoast: dev-1 --mounts--> shared volume \"my-project-postgres\"\nCoast: dev-2 --mounts--> shared volume \"my-project-postgres\"\n```\n\nThis isolates your Coast data from whatever is on your host machine, but instances still share data with each other. This is useful when you want a clean separation from your host development environment without the overhead of per-instance volumes.\n\n```toml\n[volumes.postgres_data]\nstrategy = \"shared\"\nservice = \"postgres\"\nmount = \"/var/lib/postgresql/data\"\n```\n\n## Isolated Volumes\n\nIsolated volumes give each Coast instance its own independent volume. No data is shared between instances or with the host. Each instance starts empty (or from a snapshot — see below) and diverges independently.\n\n```text\nCoast: dev-1 --mounts--> volume \"dev-1-postgres\"\nCoast: dev-2 --mounts--> volume \"dev-2-postgres\"\n```\n\nThis is the best choice for projects that are integration-test heavy and need true volume isolation between parallel environments. The tradeoff is slower startup and larger Coast builds since each instance maintains its own copy of the data.\n\n```toml\n[volumes.postgres_data]\nstrategy = \"isolated\"\nservice = \"postgres\"\nmount = \"/var/lib/postgresql/data\"\n```\n\n## Snapshotting\n\nBoth the shared and isolated strategies start with empty volumes by default. If you want instances to start with a copy of an existing host volume, set `snapshot_source` to the name of the Docker volume to copy from:\n\n```toml\n[volumes.postgres_data]\nstrategy = \"isolated\"\nsnapshot_source = \"infra_postgres_data\"\nservice = \"postgres\"\nmount = \"/var/lib/postgresql/data\"\n```\n\nThe snapshot is taken at [build time](BUILDS.md). After creation, each instance's volume diverges independently — mutations do not propagate back to the source or to other instances.\n\nCoast does not yet support runtime snapshotting (e.g., snapshotting a volume from a running instance). This is planned for a future release.\n", - "harnesses/README.md": "# Harnesses\n\nEach harness creates git worktrees in a different location. In Coasts, the\n[`worktree_dir`](../coastfiles/WORKTREE_DIR.md) array tells it where to look --\nincluding external paths like `~/.codex/worktrees` that require additional\nbind mounts.\n\nEach harness also has its own conventions for project-level instructions, skills, and commands. The matrix below shows what each harness supports so you know where to put guidance for Coasts. Each page covers the Coastfile configuration, the recommended file layout, and any caveats specific to that harness.\n\nIf one repo is used from multiple harnesses, see [Multiple Harnesses](MULTIPLE_HARNESSES.md).\n\n| Harness | Worktree location | Project instructions | Skills | Commands | Page |\n|---------|-------------------|----------------------|--------|----------|------|\n| OpenAI Codex | `~/.codex/worktrees` | `AGENTS.md` | `.agents/skills/` | Skills surface as `/` commands | [Codex](CODEX.md) |\n| Claude Code | `.claude/worktrees` | `CLAUDE.md` | `.claude/skills/` | `.claude/commands/` | [Claude Code](CLAUDE_CODE.md) |\n| Cursor | `~/.cursor/worktrees/` | `AGENTS.md` or `.cursor/rules/` | `.cursor/skills/` or `.agents/skills/` | `.cursor/commands/` | [Cursor](CURSOR.md) |\n| Conductor | `~/conductor/workspaces/` | `CLAUDE.md` | -- | -- | [Conductor](CONDUCTOR.md) |\n| T3 Code | `~/.t3/worktrees/` | `AGENTS.md` | `.agents/skills/` | -- | [T3 Code](T3_CODE.md) |\n\n## Skills vs Commands\n\nSkills and commands both let you define a reusable `/coasts` workflow. You can use either or both, depending on what the harness supports.\n\nIf your harness supports commands and you want an explicit `/coasts`\nentrypoint, one simple option is to add a command that reuses the skill.\nCommands are explicitly invoked by name, so you know exactly when the\n`/coasts` workflow runs. Skills can also be loaded automatically by the agent\nbased on context, which is useful but means you have less control over when the\ninstructions are pulled in.\n\nYou can use both. If you do, let the command reuse the skill instead of\nmaintaining a separate copy of the workflow.\n\nIf the harness only supports skills (T3 Code), use a skill. If it supports\nneither (Conductor), put the `/coasts` workflow directly in the project\ninstructions file.\n", + "harnesses/README.md": "# Harnesses\n\nEach harness creates git worktrees in a different location. In Coasts, the\n[`worktree_dir`](../coastfiles/WORKTREE_DIR.md) array tells it where to look --\nincluding external paths like `~/.codex/worktrees` that require additional\nbind mounts.\n\nEach harness also has its own conventions for project-level instructions, skills, and commands. The matrix below shows what each harness supports so you know where to put guidance for Coasts. Each page covers the Coastfile configuration, the recommended file layout, and any caveats specific to that harness.\n\nIf one repo is used from multiple harnesses, see [Multiple Harnesses](MULTIPLE_HARNESSES.md).\n\n| Harness | Worktree location | Project instructions | Skills | Commands | Page |\n|---------|-------------------|----------------------|--------|----------|------|\n| OpenAI Codex | `~/.codex/worktrees` | `AGENTS.md` | `.agents/skills/` | Skills surface as `/` commands | [Codex](CODEX.md) |\n| Claude Code | `.claude/worktrees` | `CLAUDE.md` | `.claude/skills/` | `.claude/commands/` | [Claude Code](CLAUDE_CODE.md) |\n| Cursor | `~/.cursor/worktrees/` | `AGENTS.md` or `.cursor/rules/` | `.cursor/skills/` or `.agents/skills/` | `.cursor/commands/` | [Cursor](CURSOR.md) |\n| Conductor | `~/conductor/workspaces/` | `CLAUDE.md` | -- | -- | [Conductor](CONDUCTOR.md) |\n| T3 Code | `~/.t3/worktrees/` | `AGENTS.md` | `.agents/skills/` | -- | [T3 Code](T3_CODE.md) |\n| Shep | `~/.shep/repos/*/wt` | `CLAUDE.md` | `.agents/skills/` or `.claude/skills/` | -- | [Shep](SHEP.md) |\n\n## Skills vs Commands\n\nSkills and commands both let you define a reusable `/coasts` workflow. You can use either or both, depending on what the harness supports.\n\nIf your harness supports commands and you want an explicit `/coasts`\nentrypoint, one simple option is to add a command that reuses the skill.\nCommands are explicitly invoked by name, so you know exactly when the\n`/coasts` workflow runs. Skills can also be loaded automatically by the agent\nbased on context, which is useful but means you have less control over when the\ninstructions are pulled in.\n\nYou can use both. If you do, let the command reuse the skill instead of\nmaintaining a separate copy of the workflow.\n\nIf the harness only supports skills (T3 Code), use a skill. If it supports\nneither (Conductor), put the `/coasts` workflow directly in the project\ninstructions file.\n", "harnesses/CLAUDE_CODE.md": "# Claude Code\n\n## Quick setup\n\nRequires the [Coast CLI](../GETTING_STARTED.md). Copy this prompt into your\nagent's chat to set up Coasts automatically:\n\n```prompt-copy\nclaude_code_setup_prompt.txt\n```\n\nYou can also get the skill content from the CLI: `coast skills-prompt`.\n\nAfter setup, **start a new Claude Code session** — skills and `CLAUDE.md` changes\nare loaded at session start.\n\n---\n\n[Claude Code](https://docs.anthropic.com/en/docs/claude-code/overview) creates\nworktrees inside the project at `.claude/worktrees/`. Because that directory\nlives inside the repo, Coasts can discover and assign Claude Code worktrees\nwithout any external bind mount.\n\nClaude Code is also the harness here with the clearest split between three\nlayers for Coasts:\n\n- `CLAUDE.md` for short, always-on rules for working with Coasts\n- `.claude/skills/coasts/SKILL.md` for the reusable `/coasts` workflow\n- `.claude/commands/coasts.md` only when you want a command file as an extra\n entrypoint\n\n## Setup\n\nAdd `.claude/worktrees` to `worktree_dir`:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\"]\n```\n\nBecause `.claude/worktrees` is project-relative, no external bind mount is\nneeded.\n\n## Where Coasts guidance goes\n\n### `CLAUDE.md`\n\nPut the rules for Coasts that should apply on every task here. Keep this short and\noperational:\n\n- run `coast lookup` before the first runtime command in a session\n- use `coast exec` for tests, builds, and service commands\n- use `coast ps` and `coast logs` for runtime feedback\n- ask before creating or reassigning a Coast when no match exists\n\n### `.claude/skills/coasts/SKILL.md`\n\nPut the reusable `/coasts` workflow here. This is the right home for a flow\nthat:\n\n1. runs `coast lookup` and reuses the matching Coast\n2. falls back to `coast ls` when there is no match\n3. offers `coast run`, `coast assign`, `coast unassign`, `coast checkout`, and\n `coast ui`\n4. uses the Coast CLI directly as the contract instead of wrapping it\n\nIf this repo also uses Codex, T3 Code, or Cursor, see\n[Multiple Harnesses](MULTIPLE_HARNESSES.md) and keep the canonical skill in\n`.agents/skills/coasts/`, then expose it to Claude Code.\n\n### `.claude/commands/coasts.md`\n\nClaude Code also supports project command files. For docs about Coasts, treat\nthis as optional:\n\n- use it only when you specifically want a command file\n- one simple option is to have the command reuse the same skill\n- if you give the command its own separate instructions, you are taking on a\n second copy of the workflow to maintain\n\n## Example layout\n\n### Claude Code only\n\n```text\nCLAUDE.md\n.claude/worktrees/\n.claude/skills/coasts/SKILL.md\n```\n\nIf this repo also uses Codex, T3 Code, or Cursor, use the shared pattern in\n[Multiple Harnesses](MULTIPLE_HARNESSES.md) instead of duplicating it here,\nbecause duplicated provider-specific guidance gets harder to keep in sync every\ntime you add another harness.\n\n## What Coasts does\n\n- **Run** — `coast run ` creates a new Coast instance from the latest build. Use `coast run -w ` to create and assign a Claude Code worktree in one step. See [Run](../concepts_and_terminology/RUN.md).\n- **Discovery** — Coasts reads `.claude/worktrees` like any other local worktree\n directory.\n- **Naming** — Claude Code worktrees follow the same local worktree naming\n behavior as other in-repo worktrees in the Coasts UI and CLI.\n- **Assign** — `coast assign` can switch `/workspace` to a Claude Code worktree\n without any external bind-mount indirection.\n- **Gitignored sync** — Works normally because the worktrees live inside the\n repository tree.\n- **Orphan detection** — If Claude Code removes a worktree, Coasts can detect\n the missing gitdir and unassign it when needed.\n\n## Example\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\"]\nprimary_port = \"web\"\n\n[ports]\nweb = 3000\napi = 8080\n\n[assign]\ndefault = \"none\"\n[assign.services]\nweb = \"hot\"\napi = \"hot\"\n```\n\n- `.claude/worktrees/` — Claude Code worktrees\n- `~/.codex/worktrees/` — Codex worktrees if you also use Codex in this repo\n\n## Limitations\n\n- If you duplicate the same `/coasts` workflow across `CLAUDE.md`,\n `.claude/skills`, and `.claude/commands`, those copies will drift. Keep\n `CLAUDE.md` short and keep the reusable workflow in one skill.\n- If you want one repo to work cleanly in multiple harnesses, prefer the shared\n pattern in [Multiple Harnesses](MULTIPLE_HARNESSES.md).\n", "harnesses/CODEX.md": "# Codex\n\n## Quick setup\n\nRequires the [Coast CLI](../GETTING_STARTED.md). Copy this prompt into your\nagent's chat to set up Coasts automatically:\n\n```prompt-copy\ncodex_setup_prompt.txt\n```\n\nYou can also get the skill content from the CLI: `coast skills-prompt`.\n\nAfter setup, **quit and reopen Codex** for the new skill and `AGENTS.md` to take\neffect.\n\n---\n\n[Codex](https://developers.openai.com/codex/app/worktrees/) creates worktrees at `$CODEX_HOME/worktrees` (typically `~/.codex/worktrees`). Each worktree lives under an opaque hash directory like `~/.codex/worktrees/a0db/project-name`, starts on a detached HEAD, and is cleaned up automatically based on Codex's retention policy.\n\nFrom the [Codex docs](https://developers.openai.com/codex/app/worktrees/):\n\n> Can I control where worktrees are created?\n> Not today. Codex creates worktrees under `$CODEX_HOME/worktrees` so it can manage them consistently.\n\nBecause these worktrees live outside the project root, Coasts needs explicit\nconfiguration to discover and mount them.\n\n## Setup\n\nAdd `~/.codex/worktrees` to `worktree_dir`:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]\n```\n\nCoasts expands `~` at runtime and treats any path starting with `~/` or `/` as\nexternal. See [Worktree Directories](../coastfiles/WORKTREE_DIR.md) for\ndetails.\n\nAfter changing `worktree_dir`, existing instances must be **recreated** for the bind mount to take effect:\n\n```bash\ncoast rm my-instance\ncoast build\ncoast run my-instance\n```\n\nThe worktree listing updates immediately (Coasts reads the new Coastfile), but\nassigning to a Codex worktree requires the bind mount inside the container.\n\n## Where Coasts guidance goes\n\nUse Codex's project instruction file and shared skill layout for working with\nCoasts:\n\n- put the short Coast Runtime rules in `AGENTS.md`\n- put the reusable `/coasts` workflow in `.agents/skills/coasts/SKILL.md`\n- Codex surfaces that skill as the `/coasts` command\n- if you use Codex-specific metadata, keep it beside the skill in\n `.agents/skills/coasts/agents/openai.yaml`\n- do not create a separate project command file just for docs about Coasts; the\n skill is the reusable surface\n- if this repo also uses Cursor or Claude Code, keep the canonical skill in\n `.agents/skills/` and expose it from there. See\n [Multiple Harnesses](MULTIPLE_HARNESSES.md) and\n [Skills for Host Agents](../SKILLS_FOR_HOST_AGENTS.md).\n\nFor example, a minimal `.agents/skills/coasts/agents/openai.yaml` could look\nlike this:\n\n```yaml\ninterface:\n display_name: \"Coasts\"\n short_description: \"Inspect, assign, and open Coasts for this repo\"\n default_prompt: \"Use this skill when the user wants help finding, assigning, or opening a Coast.\"\n\npolicy:\n allow_implicit_invocation: false\n```\n\nThat keeps the skill visible in Codex with a nicer label and makes `/coasts` an\nexplicit command. Only add `dependencies.tools` if the skill also needs MCP\nservers or other OpenAI-managed tool wiring.\n\n## What Coasts does\n\n- **Run** -- `coast run ` creates a new Coast instance from the latest build. Use `coast run -w ` to create and assign a Codex worktree in one step. See [Run](../concepts_and_terminology/RUN.md).\n- **Bind mount** -- At container creation, Coasts mounts\n `~/.codex/worktrees` into the container at `/host-external-wt/{index}`.\n- **Discovery** -- `git worktree list --porcelain` is repo-scoped, so only Codex worktrees belonging to the current project appear, even though the directory contains worktrees for many projects.\n- **Naming** -- Detached HEAD worktrees show as their relative path within the external dir (`a0db/my-app`, `eca7/my-app`). Branch-based worktrees show the branch name.\n- **Assign** -- `coast assign` remounts `/workspace` from the external bind mount path.\n- **Gitignored sync** -- Runs on the host filesystem with absolute paths, works without the bind mount.\n- **Orphan detection** -- The git watcher scans external directories\n recursively, filtering by `.git` gitdir pointers. If Codex deletes a\n worktree, Coasts auto-unassigns the instance.\n\n## Example\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\"]\nprimary_port = \"web\"\n\n[ports]\nweb = 3000\napi = 8080\n\n[assign]\ndefault = \"none\"\n[assign.services]\nweb = \"hot\"\napi = \"hot\"\n```\n\n- `.claude/worktrees/` -- Claude Code (local, no special handling)\n- `~/.codex/worktrees/` -- Codex (external, bind-mounted)\n\n## Limitations\n\n- Codex may clean up worktrees at any time. The orphan detection in Coasts\n handles this gracefully.\n", "harnesses/CONDUCTOR.md": "# Conductor\n\n## Quick setup\n\nRequires the [Coast CLI](../GETTING_STARTED.md). Copy this prompt into your\nagent's chat to set up Coasts automatically:\n\n```prompt-copy\nconductor_setup_prompt.txt\n```\n\nYou can also get the skill content from the CLI: `coast skills-prompt`.\n\n> **Important:** Conductor runs each session in an isolated git worktree. The\n> setup prompt creates files that only exist in the current workspace — commit\n> and merge them into your main branch or they won't be available in new\n> sessions.\n\nAfter setup, **fully close and reopen Conductor** for changes to take effect. If\nthe `/coasts` command does not appear, close and reopen again.\n\n## Setup\n\nAdd `~/conductor/workspaces/` to `worktree_dir`. Unlike Codex (which stores all projects under one flat directory), Conductor nests worktrees under a per-project subdirectory, so the path must include the project name. In the example below, `my-app` must match the actual folder name under `~/conductor/workspaces/` for your repo.\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/conductor/workspaces/my-app\"]\n```\n\nConductor allows you to configure the workspaces path per-repository, so the default `~/conductor/workspaces` may not match your setup. Check your Conductor repository settings to find the actual path and adjust accordingly — the principle is the same regardless of where the directory lives.\n\nCoasts expands `~` at runtime and treats any path starting with `~/` or `/` as\nexternal. See [Worktree Directories](../coastfiles/WORKTREE_DIR.md) for\ndetails.\n\nAfter changing `worktree_dir`, existing instances must be **recreated** for the bind mount to take effect:\n\n```bash\ncoast rm my-instance\ncoast build\ncoast run my-instance\n```\n\nThe worktree listing updates immediately (Coasts reads the new Coastfile), but\nassigning to a Conductor worktree requires the bind mount inside the container.\n\n## Where Coasts guidance goes\n\nTreat Conductor as its own harness for working with Coasts:\n\n- put the short Coast Runtime rules in `CLAUDE.md`\n- use Conductor Repository Settings scripts for setup or run behavior that is\n actually Conductor-specific\n- do not assume full Claude Code project command or project skill behavior here\n- if you add a command and it does not appear, fully close and reopen\n Conductor before testing again\n- if this repo also uses other harnesses, see\n [Multiple Harnesses](MULTIPLE_HARNESSES.md) and\n [Skills for Host Agents](../SKILLS_FOR_HOST_AGENTS.md) for ways to keep the\n shared `/coasts` workflow in one place\n\n## What Coasts does\n\n- **Run** — `coast run ` creates a new Coast instance from the latest build. Use `coast run -w ` to create and assign a Conductor worktree in one step. See [Run](../concepts_and_terminology/RUN.md).\n- **Bind mount** — At container creation, Coasts mounts\n `~/conductor/workspaces/` into the container at\n `/host-external-wt/{index}`.\n- **Discovery** — `git worktree list --porcelain` is repo-scoped, so only worktrees belonging to the current project appear.\n- **Naming** — Conductor worktrees use named branches, so they appear by branch\n name in the Coasts UI and CLI (e.g., `scroll-to-bottom-btn`). A branch can\n only be checked out in one Conductor workspace at a time.\n- **Assign** — `coast assign` remounts `/workspace` from the external bind mount path.\n- **Gitignored sync** — Runs on the host filesystem with absolute paths, works without the bind mount.\n- **Orphan detection** — The git watcher scans external directories\n recursively, filtering by `.git` gitdir pointers. If Conductor archives or\n deletes a workspace, Coasts auto-unassigns the instance.\n\n## Example\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nworktree_dir = [\"~/conductor/workspaces/my-app\"]\nprimary_port = \"web\"\n\n[ports]\nweb = 3000\napi = 8080\n\n[assign]\ndefault = \"none\"\n[assign.services]\nweb = \"hot\"\napi = \"hot\"\n```\n\n- `~/conductor/workspaces/my-app/` — Conductor (external, bind-mounted; replace `my-app` with your repo folder name)\n\n## Conductor Env Vars\n\n- Avoid relying on Conductor-specific environment variables (e.g.,\n `CONDUCTOR_PORT`, `CONDUCTOR_WORKSPACE_PATH`) for runtime configuration\n inside Coasts. Coasts manages ports, workspace paths, and service discovery\n independently — use Coastfile `[ports]` and `coast exec` instead.", "harnesses/CURSOR.md": "# Cursor\n\n## Quick setup\n\nRequires the [Coast CLI](../GETTING_STARTED.md). Copy this prompt into your\nagent's chat to set up Coasts automatically:\n\n```prompt-copy\ncursor_setup_prompt.txt\n```\n\nYou can also get the skill content from the CLI: `coast skills-prompt`.\n\nAfter setup, **restart Cursor** for the skill and rules changes to take effect.\n\n---\n\n[Cursor](https://cursor.com/docs/agent/overview) can work directly in your\ncurrent checkout, and its Parallel Agents feature can also create git\nworktrees under `~/.cursor/worktrees//`.\n\nFor docs about Coasts, that means there are two setup cases:\n\n- if you are just using Cursor in the current checkout, no Cursor-specific\n `worktree_dir` entry is required\n- if you use Cursor Parallel Agents, add the Cursor worktree directory to\n `worktree_dir` so Coasts can discover and assign those worktrees\n\n## Setup\n\n### Current checkout only\n\nIf Cursor is just editing the checkout you already opened, Coasts does not need\nany special Cursor-specific worktree path. Coasts will treat that checkout like\nany other local repository root.\n\n### Cursor Parallel Agents\n\nIf you use Parallel Agents, add `~/.cursor/worktrees/` to\n`worktree_dir`:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.cursor/worktrees/my-app\"]\n```\n\nCursor stores each agent worktree beneath that per-project directory. Coasts\nexpands `~` at runtime and treats the path as external, so existing instances\nmust be recreated for the bind mount to take effect:\n\n```bash\ncoast rm my-instance\ncoast build\ncoast run my-instance\n```\n\nThe worktree listing updates immediately after the Coastfile change, but\nassigning to a Cursor Parallel Agent worktree requires the external bind mount\ninside the container.\n\n## Where Coasts guidance goes\n\n### `AGENTS.md` or `.cursor/rules/coast.md`\n\nPut the short, always-on Coast Runtime rules here:\n\n- use `AGENTS.md` if you want the most portable project instructions\n- use `.cursor/rules/coast.md` if you want Cursor-native project rules and\n settings UI support\n- do not duplicate the same Coast Runtime block in both unless you have a clear\n reason\n\n### `.cursor/skills/coasts/SKILL.md` or shared `.agents/skills/coasts/SKILL.md`\n\nPut the reusable `/coasts` workflow here:\n\n- for a Cursor-only repo, `.cursor/skills/coasts/SKILL.md` is a natural home\n- for a multi-harness repo, keep the canonical skill in\n `.agents/skills/coasts/SKILL.md`; Cursor can load that directly\n- the skill should own the real `/coasts` workflow: `coast lookup`,\n `coast ls`, `coast run`, `coast assign`, `coast unassign`,\n `coast checkout`, and `coast ui`\n\n### `.cursor/commands/coasts.md`\n\nCursor also supports project commands. For docs about Coasts, treat commands as\noptional:\n\n- add a command only when you want an explicit `/coasts` entrypoint\n- one simple option is to have the command reuse the same skill\n- if you give the command its own separate instructions, you are taking on a\n second copy of the workflow to maintain\n\n### `.cursor/worktrees.json`\n\nUse `.cursor/worktrees.json` for Cursor's own worktree bootstrap, not for Coasts\npolicy:\n\n- install dependencies\n- copy or symlink `.env` files\n- run database migrations or other one-time bootstrap steps\n\nDo not move the Coast Runtime rules or Coast CLI workflow into\n`.cursor/worktrees.json`.\n\n## Example layout\n\n### Cursor only\n\n```text\nAGENTS.md\n.cursor/skills/coasts/SKILL.md\n.cursor/commands/coasts.md # optional\n.cursor/rules/coast.md # optional alternative to AGENTS.md\n.cursor/worktrees.json # optional, for Parallel Agents bootstrap\n```\n\n### Cursor plus other harnesses\n\n```text\nAGENTS.md\nCLAUDE.md\n.agents/skills/coasts/SKILL.md\n.agents/skills/coasts/agents/openai.yaml\n.claude/skills/coasts -> ../../.agents/skills/coasts\n.cursor/commands/coasts.md # optional\n```\n\n## What Coasts does\n\n- **Run** — `coast run ` creates a new Coast instance from the latest build. Use `coast run -w ` to create and assign a Cursor worktree in one step. See [Run](../concepts_and_terminology/RUN.md).\n- **Current checkout** — No special Cursor handling is required when Cursor is\n working directly in the repo you opened.\n- **Bind mount** — For Parallel Agents, Coasts mounts\n `~/.cursor/worktrees/` into the container at\n `/host-external-wt/{index}`.\n- **Discovery** — `git worktree list --porcelain` remains repo-scoped, so Coasts\n only shows Cursor worktrees that belong to the current project.\n- **Naming** — Cursor Parallel Agent worktrees appear by their branch names in\n Coasts' CLI and UI.\n- **Assign** — `coast assign` remounts `/workspace` from the external bind\n mount path when a Cursor worktree is selected.\n- **Gitignored sync** — Continues to work on the host filesystem with absolute\n paths.\n- **Orphan detection** — If Cursor cleans up old worktrees, Coasts can detect\n the missing gitdir and unassign them when needed.\n\n## Example\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\", \"~/.cursor/worktrees/my-app\"]\nprimary_port = \"web\"\n\n[ports]\nweb = 3000\napi = 8080\n\n[assign]\ndefault = \"none\"\n[assign.services]\nweb = \"hot\"\napi = \"hot\"\n```\n\n- `.claude/worktrees/` — Claude Code worktrees\n- `~/.codex/worktrees/` — Codex worktrees\n- `~/.cursor/worktrees/my-app/` — Cursor Parallel Agent worktrees\n\n## Limitations\n\n- If you are not using Cursor Parallel Agents, do not add\n `~/.cursor/worktrees/` just because you happen to be editing in\n Cursor.\n- Keep the Coast Runtime rules in one always-on place: `AGENTS.md` or\n `.cursor/rules/coast.md`. Duplicating both invites drift.\n- Keep the reusable `/coasts` workflow in a skill. `.cursor/worktrees.json` is\n for Cursor bootstrap, not Coasts policy.\n- If one repo is shared across Cursor, Codex, Claude Code, or T3 Code, prefer\n the shared layout in [Multiple Harnesses](MULTIPLE_HARNESSES.md).\n", "harnesses/MULTIPLE_HARNESSES.md": "# Multiple Harnesses\n\nIf one repository is used from more than one harness, one way to consolidate\nthe Coasts setup is to keep the shared `/coasts` workflow in one place and keep\nthe harness-specific always-on rules in the files for each harness.\n\n## Recommended layout\n\n```text\nAGENTS.md\nCLAUDE.md\n.cursor/rules/coast.md # optional Cursor-native always-on rules\n.agents/skills/coasts/SKILL.md\n.agents/skills/coasts/agents/openai.yaml\n.claude/skills/coasts -> ../../.agents/skills/coasts\n.cursor/commands/coasts.md # optional, thin, harness-specific\n.claude/commands/coasts.md # optional, thin, harness-specific\n```\n\nUse this layout like this:\n\n- `AGENTS.md` — short, always-on rules for working with Coasts in Codex and T3\n Code\n- `.cursor/rules/coast.md` — optional Cursor-native always-on rules\n- `CLAUDE.md` — short, always-on rules for working with Coasts in Claude Code\n and Conductor\n- `.agents/skills/coasts/SKILL.md` — canonical reusable `/coasts` workflow\n- `.agents/skills/coasts/agents/openai.yaml` — optional Codex/OpenAI metadata\n- `.claude/skills/coasts` — Claude-facing mirror or symlink when Claude Code\n also needs the same skill\n- `.cursor/commands/coasts.md` — optional Cursor command file; one simple\n option is to have it reuse the same skill\n- `.claude/commands/coasts.md` — optional explicit command file; one simple\n option is to have it reuse the same skill\n\n## Step-by-step\n\n1. Put the Coast Runtime rules in the always-on instruction files.\n - `AGENTS.md`, `CLAUDE.md`, or `.cursor/rules/coast.md` should answer the\n \"every task\" rules: run `coast lookup` first, use `coast exec`, read logs\n with `coast logs`, ask before `coast assign` or `coast run` when there is\n no match.\n2. Create one canonical skill for Coasts.\n - Put the reusable `/coasts` workflow in `.agents/skills/coasts/SKILL.md`.\n - Use the Coast CLI directly inside that skill: `coast lookup`,\n `coast ls`, `coast run`, `coast assign`, `coast unassign`,\n `coast checkout`, and `coast ui`.\n3. Expose that skill only where a harness needs a different path.\n - Codex, T3 Code, and Cursor can all use `.agents/skills/` directly.\n - Claude Code needs `.claude/skills/`, so mirror or symlink the canonical\n skill into that location.\n4. Add a command file only if you want an explicit `/coasts` entrypoint.\n - If you create `.claude/commands/coasts.md` or\n `.cursor/commands/coasts.md`, one simple option is to have the command\n reuse the same skill.\n - If you give the command its own separate instructions, you are taking on a\n second copy of the workflow to maintain.\n5. Keep Conductor-specific setup in Conductor, not in the skill.\n - Use Conductor Repository Settings scripts for bootstrap or run behavior\n that belongs to Conductor itself.\n - Keep Coasts policy and use of the `coast` CLI in `CLAUDE.md` and the\n shared skill.\n\n## Concrete `/coasts` example\n\nA good shared `coasts` skill should do three jobs:\n\n1. `Use Existing Coast`\n - run `coast lookup`\n - if a match exists, use `coast exec`, `coast ps`, and `coast logs`\n2. `Manage Assignment`\n - run `coast ls`\n - offer `coast run`, `coast assign`, `coast unassign`, or\n `coast checkout`\n - ask before reusing or disrupting an existing slot\n3. `Open UI`\n - run `coast ui`\n\nThat is the right place for the `/coasts` workflow. The always-on files should\nonly hold the short rules that must apply even when the skill is never invoked.\n\n## Symlink pattern\n\nIf you want Claude Code to reuse the same skill as Codex, T3 Code, or Cursor,\none option is a symlink:\n\n```bash\nmkdir -p .claude/skills\nln -s ../../.agents/skills/coasts .claude/skills/coasts\n```\n\nA checked-in mirror is also fine if your team prefers not to use symlinks. The\nmain goal is just to avoid unnecessary drift between copies.\n\n## Harness-specific cautions\n\n- Claude Code: project skills and optional project commands are both valid, but\n keep the logic in the skill.\n- Cursor: use `AGENTS.md` or `.cursor/rules/coast.md` for the short Coast\n Runtime rules, use a skill for the reusable workflow, and keep\n `.cursor/commands` optional.\n- Conductor: treat it as `CLAUDE.md` plus Conductor scripts and settings first.\n If you add a command and it does not appear, fully close and reopen the app\n before checking again.\n- T3 Code: this is the thinnest harness surface here. Use the Codex-style\n `AGENTS.md` plus `.agents/skills` pattern, and do not invent a separate\n T3-specific command layout for docs about Coasts.\n- Codex: keep `AGENTS.md` short and put the reusable workflow in\n `.agents/skills`.\n", + "harnesses/SHEP.md": "# Shep\n\n## Quick setup\n\nRequires the [Coast CLI](../GETTING_STARTED.md). Copy this prompt into your\nagent's chat to set up Coasts automatically:\n\n```prompt-copy\nshep_setup_prompt.txt\n```\n\nYou can also get the skill content from the CLI: `coast skills-prompt`.\n\nAfter setup, **quit and reopen your editor** for the new skill and project\ninstructions to take effect.\n\n---\n\n[Shep](https://shep-ai.github.io/cli/) creates worktrees at `~/.shep/repos/{hash}/wt/{branch-slug}`. The hash is the first 16 hex characters of the SHA-256 of the repository's absolute path, so it is deterministic per-repo but opaque. All worktrees for a given repo share the same hash and are differentiated by the `wt/{branch-slug}` subdirectory.\n\nFrom the Shep CLI, `shep feat show ` prints the worktree path, or\n`ls ~/.shep/repos` lists the per-repo hash directories.\n\nBecause the hash varies per repo, Coasts uses a **glob pattern** to discover\nshep worktrees without requiring the user to hard-code the hash.\n\n## Setup\n\nAdd `~/.shep/repos/*/wt` to `worktree_dir`:\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.shep/repos/*/wt\"]\n```\n\nThe `*` matches the per-repo hash directory. At runtime Coasts expands the glob,\nfinds the matching directory (e.g. `~/.shep/repos/a21f0cda9ab9d456/wt`), and\nbind-mounts it into the container. See\n[Worktree Directories](../coastfiles/WORKTREE_DIR.md) for full details on glob\npatterns.\n\nAfter changing `worktree_dir`, existing instances must be **recreated** for the bind mount to take effect:\n\n```bash\ncoast rm my-instance\ncoast build\ncoast run my-instance\n```\n\nThe worktree listing updates immediately (Coasts reads the new Coastfile), but\nassigning to a Shep worktree requires the bind mount inside the container.\n\n## Where Coasts guidance goes\n\nShep wraps Claude Code under the hood, so follow the Claude Code conventions:\n\n- put the short Coast Runtime rules in `CLAUDE.md`\n- put the reusable `/coasts` workflow in `.claude/skills/coasts/SKILL.md` or\n the shared `.agents/skills/coasts/SKILL.md`\n- if this repo also uses other harnesses, see\n [Multiple Harnesses](MULTIPLE_HARNESSES.md) and\n [Skills for Host Agents](../SKILLS_FOR_HOST_AGENTS.md)\n\n## What Coasts does\n\n- **Run** -- `coast run ` creates a new Coast instance from the latest build. Use `coast run -w ` to create and assign a Shep worktree in one step. See [Run](../concepts_and_terminology/RUN.md).\n- **Bind mount** -- At container creation, Coasts resolves the glob\n `~/.shep/repos/*/wt` and mounts each matching directory into the container at\n `/host-external-wt/{index}`.\n- **Discovery** -- `git worktree list --porcelain` is repo-scoped, so only\n worktrees belonging to the current project appear.\n- **Naming** -- Shep worktrees use named branches, so they appear by branch\n name in the Coasts UI and CLI (e.g., `feat-green-background`).\n- **Assign** -- `coast assign` remounts `/workspace` from the external bind mount path.\n- **Gitignored sync** -- Runs on the host filesystem with absolute paths, works without the bind mount.\n- **Orphan detection** -- The git watcher scans external directories\n recursively, filtering by `.git` gitdir pointers. If Shep deletes a\n worktree, Coasts auto-unassigns the instance.\n\n## Example\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nworktree_dir = [\".worktrees\", \"~/.shep/repos/*/wt\"]\nprimary_port = \"web\"\n\n[ports]\nweb = 3000\napi = 8080\n\n[assign]\ndefault = \"none\"\n[assign.services]\nweb = \"hot\"\napi = \"hot\"\n```\n\n- `~/.shep/repos/*/wt` -- Shep (external, bind-mounted via glob expansion)\n\n## Shep path structure\n\n```\n~/.shep/repos/\n {sha256-of-repo-path-first-16-chars}/\n wt/\n {branch-slug}/ <-- git worktree\n {branch-slug}/\n```\n\nKey points:\n- Same repo = same hash every time (deterministic, not random)\n- Different repos = different hashes\n- Path separators are normalized to `/` before hashing\n- The hash can be found via `shep feat show ` or `ls ~/.shep/repos`\n", "harnesses/T3_CODE.md": "# T3 Code\n\n## Quick setup\n\nRequires the [Coast CLI](../GETTING_STARTED.md). Copy this prompt into your\nagent's chat to set up Coasts automatically:\n\n```prompt-copy\nt3_code_setup_prompt.txt\n```\n\nYou can also get the skill content from the CLI: `coast skills-prompt`.\n\nAfter setup, **restart T3 Code** for the skill and rules changes to take effect.\n\n**Note:** T3 Code may not load project-level skills from `.agents/skills/` or\n`.claude/skills/` yet. The setup prompt also places the skill in\n`~/.codex/skills/coasts/` so it is available globally to the Codex provider.\nThe Coast Runtime rules in `AGENTS.md` and `CLAUDE.md` still apply on every\ntask regardless.\n\n---\n\n[T3 Code](https://github.com/pingdotgg/t3code) creates git worktrees at\n`~/.t3/worktrees//`, checked out on named branches.\n\nT3 Code wraps Codex, so it uses `AGENTS.md` for always-on rules and\n`.agents/skills/coasts/SKILL.md` for the reusable `/coasts` workflow.\n\nBecause these worktrees live outside the project root, Coasts needs explicit\nconfiguration to discover and mount them.\n\n## Setup\n\nAdd `~/.t3/worktrees/` to `worktree_dir`. T3 Code nests worktrees under a per-project subdirectory, so the path must include the project name. In the example below, `my-app` must match the actual folder name under `~/.t3/worktrees/` for your repo.\n\n```toml\n[coast]\nname = \"my-app\"\nworktree_dir = [\".worktrees\", \"~/.t3/worktrees/my-app\"]\n```\n\nCoasts expands `~` at runtime and treats any path starting with `~/` or `/` as\nexternal. See [Worktree Directories](../coastfiles/WORKTREE_DIR.md) for\ndetails.\n\nAfter changing `worktree_dir`, existing instances must be **recreated** for the bind mount to take effect:\n\n```bash\ncoast rm my-instance\ncoast build\ncoast run my-instance\n```\n\nThe worktree listing updates immediately (Coasts reads the new Coastfile), but\nassigning to a T3 Code worktree requires the bind mount inside the container.\n\n## Where Coasts guidance goes\n\nUse this layout for T3 Code:\n\n- put the short Coast Runtime rules in `AGENTS.md`\n- put the reusable `/coasts` workflow in `.agents/skills/coasts/SKILL.md`\n- do not add a separate T3-specific project command or slash-command layer for\n Coasts\n- if this repo uses multiple harnesses, see\n [Multiple Harnesses](MULTIPLE_HARNESSES.md) and\n [Skills for Host Agents](../SKILLS_FOR_HOST_AGENTS.md).\n\n## What Coasts does\n\n- **Run** — `coast run ` creates a new Coast instance from the latest build. Use `coast run -w ` to create and assign a T3 Code worktree in one step. See [Run](../concepts_and_terminology/RUN.md).\n- **Bind mount** — At container creation, Coasts mounts\n `~/.t3/worktrees/` into the container at\n `/host-external-wt/{index}`.\n- **Discovery** — `git worktree list --porcelain` is repo-scoped, so only worktrees belonging to the current project appear.\n- **Naming** — T3 Code worktrees use named branches, so they appear by branch\n name in the Coasts UI and CLI.\n- **Assign** — `coast assign` remounts `/workspace` from the external bind mount path.\n- **Gitignored sync** — Runs on the host filesystem with absolute paths, works without the bind mount.\n- **Orphan detection** — The git watcher scans external directories\n recursively, filtering by `.git` gitdir pointers. If T3 Code removes a\n workspace, Coasts auto-unassigns the instance.\n\n## Example\n\n```toml\n[coast]\nname = \"my-app\"\ncompose = \"./docker-compose.yml\"\nworktree_dir = [\".worktrees\", \".claude/worktrees\", \"~/.codex/worktrees\", \"~/.t3/worktrees/my-app\"]\nprimary_port = \"web\"\n\n[ports]\nweb = 3000\napi = 8080\n\n[assign]\ndefault = \"none\"\n[assign.services]\nweb = \"hot\"\napi = \"hot\"\n```\n\n- `.claude/worktrees/` — Claude Code (local, no special handling)\n- `~/.codex/worktrees/` — Codex (external, bind-mounted)\n- `~/.t3/worktrees/my-app/` — T3 Code (external, bind-mounted; replace `my-app` with your repo folder name)\n\n## Limitations\n\n- Avoid relying on T3 Code-specific environment variables for runtime\n configuration inside Coasts. Coasts manages ports, workspace paths, and\n service discovery independently — use Coastfile `[ports]` and `coast exec`\n instead.\n", - "harnesses/claude_code_setup_prompt.txt": "You are setting up the Coasts skill for Claude Code in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nAsk the user: should I set this up **globally** (available in every project on this machine) or **for this project only**?\n\n## Step 4: Place files\n\nIf a target file already exists, append the Coast Runtime section rather than overwriting — but check first whether a `# Coast Runtime` section is already present and skip if so.\n\n**Global setup:**\n- Append the Coast Runtime rules to `~/.claude/CLAUDE.md`\n- Write the Coasts skill to `~/.claude/skills/coasts/SKILL.md`\n\n**Project setup:**\n- Append the Coast Runtime rules to `CLAUDE.md` at the project root\n- Write the Coasts skill to `.claude/skills/coasts/SKILL.md`\n\n## Step 5: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Start a new Claude Code session** for the skill and CLAUDE.md\nchanges to take effect.\n", - "harnesses/codex_setup_prompt.txt": "You are setting up the Coasts skill for OpenAI Codex in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Place files\n\nCodex setup is always project-level. If a target file already exists, append the Coast Runtime section rather than overwriting — but check first whether a `# Coast Runtime` section is already present and skip if so.\n\n- Append the Coast Runtime rules to `AGENTS.md` at the project root\n- Write the Coasts skill to `.agents/skills/coasts/SKILL.md`\n- Create `.agents/skills/coasts/agents/openai.yaml` with this content:\n\ninterface:\n display_name: \"Coasts\"\n short_description: \"Inspect, assign, and open Coasts for this repo\"\n default_prompt: \"Use this skill when the user wants help finding, assigning, or opening a Coast.\"\n\npolicy:\n allow_implicit_invocation: false\n\n## Step 4: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Quit and reopen Codex** for the skill and AGENTS.md changes to\ntake effect.\n", - "harnesses/conductor_setup_prompt.txt": "You are setting up the Coasts skill for Conductor in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nAsk the user: which providers do you use in Conductor?\n\n- **Claude** (Anthropic)\n- **Codex** (OpenAI)\n- **Both**\n- **Other** — if the user names a different provider, run\n `coast docs --path SKILLS_FOR_HOST_AGENTS.md` and\n `coast docs --path harnesses/README.md` to determine where that provider\n expects project instructions and skills, then follow the same pattern\n\n## Step 4: Place files\n\nConductor setup is always project-level. If a target file already exists, append\nthe Coast Runtime section rather than overwriting — but check first whether a\n`# Coast Runtime` section is already present and skip if so.\n\n**If the user selected Claude (or both):**\n- Append the Coast Runtime rules to `CLAUDE.md` at the project root\n- Write the Coasts skill to `.claude/skills/coasts/SKILL.md`\n\n**If the user selected Codex (or both):**\n- Append the Coast Runtime rules to `AGENTS.md` at the project root\n- Write the Coasts skill to `.agents/skills/coasts/SKILL.md`\n- Create `.agents/skills/coasts/agents/openai.yaml` with this content:\n\ninterface:\n display_name: \"Coasts\"\n short_description: \"Inspect, assign, and open Coasts for this repo\"\n default_prompt: \"Use this skill when the user wants help finding, assigning, or opening a Coast.\"\n\npolicy:\n allow_implicit_invocation: false\n\n## Step 5: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Fully close and reopen Conductor** for changes to take effect.\nIf the `/coasts` command does not appear immediately, close and reopen again.\n\n## Step 6: Commit the new files\n\nConductor runs each session in an isolated git worktree. Uncommitted files will\nnot carry over to new sessions. Commit the files you just created so they are\navailable in every future workspace:\n\n git add CLAUDE.md .claude/ AGENTS.md .agents/ 2>/dev/null; git status\n\nShow the user which files are staged and ask them to confirm before committing.\nUse a commit message like `[dh] feat: add Coasts skill for Conductor`.\n", - "harnesses/cursor_setup_prompt.txt": "You are setting up the Coasts skill for Cursor in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nAsk the user: should I set this up **globally** (available in every project on this machine) or **for this project only**?\n\n## Step 4: Place files\n\nIf a target file already exists, append the Coast Runtime section rather than overwriting — but check first whether a `# Coast Runtime` section is already present and skip if so.\n\n**Global setup:**\n- Append the Coast Runtime rules to the user's global Cursor rules\n- Write the Coasts skill to `~/.cursor/skills/coasts/SKILL.md`\n\n**Project setup:**\n- Append the Coast Runtime rules to `AGENTS.md` at the project root (or `.cursor/rules/coast.md` if the user prefers Cursor-native rules — ask them)\n- Write the Coasts skill to `.cursor/skills/coasts/SKILL.md`\n\n## Step 5: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Restart Cursor** for the skill and rules changes to take\neffect.\n", - "harnesses/t3_code_setup_prompt.txt": "You are setting up the Coasts skill for T3 Code in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nT3 Code supports multiple providers. Ask the user: which providers do you use\nin T3 Code?\n\n- **Codex** (OpenAI)\n- **Claude** (Anthropic)\n- **Both**\n- **Other** — if the user names a different provider, run\n `coast docs --path SKILLS_FOR_HOST_AGENTS.md` and\n `coast docs --path harnesses/README.md` to determine where that provider\n expects project instructions and skills, then follow the same pattern\n\n## Step 4: Place files\n\nT3 Code setup is always project-level. If a target file already exists, append\nthe Coast Runtime section rather than overwriting — but check first whether a\n`# Coast Runtime` section is already present and skip if so.\n\n**If the user selected Codex (or both):**\n- Append the Coast Runtime rules to `AGENTS.md` at the project root\n- Write the Coasts skill to `.agents/skills/coasts/SKILL.md` (project-level)\n- Also write the Coasts skill to `~/.codex/skills/coasts/SKILL.md` (global\n fallback — T3 Code may not scan project-level `.agents/skills/` yet)\n- Create `.agents/skills/coasts/agents/openai.yaml` with this content:\n\ninterface:\n display_name: \"Coasts\"\n short_description: \"Inspect, assign, and open Coasts for this repo\"\n default_prompt: \"Use this skill when the user wants help finding, assigning, or opening a Coast.\"\n\npolicy:\n allow_implicit_invocation: false\n\n**If the user selected Claude (or both):**\n- Append the Coast Runtime rules to `CLAUDE.md` at the project root\n- Write the Coasts skill to `.claude/skills/coasts/SKILL.md` (project-level)\n- Also write the Coasts skill to `~/.claude/skills/coasts/SKILL.md` (global\n fallback — T3 Code may not scan project-level `.claude/skills/` reliably)\n\n**Note:** T3 Code may not load project-level skills from `.agents/skills/` or\n`.claude/skills/` yet. The `AGENTS.md` and `CLAUDE.md` rules still apply on\nevery task regardless. The global `~/.codex/skills/coasts/` fallback ensures\nthe Codex provider can find the skill.\n\n## Step 5: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\nExplain that the global `~/.codex/skills/coasts/` copy is a workaround for\nT3 Code not loading project-level skills yet.\n\nTell the user: **Restart T3 Code** for the skill and rules changes to take\neffect.\n", + "harnesses/claude_code_setup_prompt.txt": "You are setting up the Coasts skill for Claude Code in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nAsk the user: should I set this up **globally** (available in every project on this machine) or **for this project only**?\n\n## Step 4: Place files\n\nIf a target file already exists, append the Coast Runtime section rather than overwriting — but check first whether a `# Coast Runtime` section is already present and skip if so.\n\n**Global setup:**\n- Append the Coast Runtime rules to `~/.claude/CLAUDE.md`\n- Write the Coasts skill to `~/.claude/skills/coasts/SKILL.md`\n\n**Project setup:**\n- Append the Coast Runtime rules to `CLAUDE.md` at the project root\n- Write the Coasts skill to `.claude/skills/coasts/SKILL.md`\n\n## Step 5: Update the Coastfile\n\nRead the `Coastfile` at the project root. Look at the `worktree_dir` field in the\n`[coast]` section.\n\nIf `.claude/worktrees` is **not** already listed in `worktree_dir`:\n\n- If `worktree_dir` is a single string, convert it to an array and append\n `.claude/worktrees`. For example, `worktree_dir = \".worktrees\"` becomes\n `worktree_dir = [\".worktrees\", \".claude/worktrees\"]`.\n- If `worktree_dir` is already an array, append `.claude/worktrees` to it.\n- If `worktree_dir` is not present at all, add\n `worktree_dir = [\".worktrees\", \".claude/worktrees\"]`.\n\nThis is a relative path (inside the project root), so no container recreation is\nneeded — changes take effect immediately for worktree listing.\n\n## Step 6: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Start a new Claude Code session** for the skill and CLAUDE.md\nchanges to take effect.\n", + "harnesses/codex_setup_prompt.txt": "You are setting up the Coasts skill for OpenAI Codex in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Place files\n\nCodex setup is always project-level. If a target file already exists, append the Coast Runtime section rather than overwriting — but check first whether a `# Coast Runtime` section is already present and skip if so.\n\n- Append the Coast Runtime rules to `AGENTS.md` at the project root\n- Write the Coasts skill to `.agents/skills/coasts/SKILL.md`\n- Create `.agents/skills/coasts/agents/openai.yaml` with this content:\n\ninterface:\n display_name: \"Coasts\"\n short_description: \"Inspect, assign, and open Coasts for this repo\"\n default_prompt: \"Use this skill when the user wants help finding, assigning, or opening a Coast.\"\n\npolicy:\n allow_implicit_invocation: false\n\n## Step 4: Update the Coastfile\n\nRead the `Coastfile` at the project root. Look at the `worktree_dir` field in the\n`[coast]` section.\n\nIf `~/.codex/worktrees` is **not** already listed in `worktree_dir`:\n\n- If `worktree_dir` is a single string, convert it to an array and append\n `~/.codex/worktrees`. For example, `worktree_dir = \".worktrees\"` becomes\n `worktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]`.\n- If `worktree_dir` is already an array, append `~/.codex/worktrees` to it.\n- If `worktree_dir` is not present at all, add\n `worktree_dir = [\".worktrees\", \"~/.codex/worktrees\"]`.\n\nThis is an external path, so if a Coast instance is already running for this\nproject it must be recreated with `coast run` for the new bind mount to take\neffect. Tell the user this.\n\n## Step 5: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Quit and reopen Codex** for the skill and AGENTS.md changes to\ntake effect.\n", + "harnesses/conductor_setup_prompt.txt": "You are setting up the Coasts skill for Conductor in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nAsk the user: which providers do you use in Conductor?\n\n- **Claude** (Anthropic)\n- **Codex** (OpenAI)\n- **Both**\n- **Other** — if the user names a different provider, run\n `coast docs --path SKILLS_FOR_HOST_AGENTS.md` and\n `coast docs --path harnesses/README.md` to determine where that provider\n expects project instructions and skills, then follow the same pattern\n\n## Step 4: Place files\n\nConductor setup is always project-level. If a target file already exists, append\nthe Coast Runtime section rather than overwriting — but check first whether a\n`# Coast Runtime` section is already present and skip if so.\n\n**If the user selected Claude (or both):**\n- Append the Coast Runtime rules to `CLAUDE.md` at the project root\n- Write the Coasts skill to `.claude/skills/coasts/SKILL.md`\n\n**If the user selected Codex (or both):**\n- Append the Coast Runtime rules to `AGENTS.md` at the project root\n- Write the Coasts skill to `.agents/skills/coasts/SKILL.md`\n- Create `.agents/skills/coasts/agents/openai.yaml` with this content:\n\ninterface:\n display_name: \"Coasts\"\n short_description: \"Inspect, assign, and open Coasts for this repo\"\n default_prompt: \"Use this skill when the user wants help finding, assigning, or opening a Coast.\"\n\npolicy:\n allow_implicit_invocation: false\n\n## Step 5: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Fully close and reopen Conductor** for changes to take effect.\nIf the `/coasts` command does not appear immediately, close and reopen again.\n\n## Step 6: Update the Coastfile\n\nRead the `Coastfile` at the project root. Look at the `worktree_dir` field in the\n`[coast]` section. Also read the `name` field — you will need it to construct the\nworktree path.\n\nThe Conductor worktree directory is `~/conductor/workspaces/`, where\n`` is the value of the `name` field in the Coastfile.\n\nIf that path is **not** already listed in `worktree_dir`:\n\n- If `worktree_dir` is a single string, convert it to an array and append the\n Conductor path. For example, `worktree_dir = \".worktrees\"` becomes\n `worktree_dir = [\".worktrees\", \"~/conductor/workspaces/my-app\"]`.\n- If `worktree_dir` is already an array, append the Conductor path to it.\n- If `worktree_dir` is not present at all, add\n `worktree_dir = [\".worktrees\", \"~/conductor/workspaces/\"]` using the\n actual project name.\n\nThis is an external path, so if a Coast instance is already running for this\nproject it must be recreated with `coast run` for the new bind mount to take\neffect. Tell the user this.\n\n## Step 7: Commit the new files\n\nConductor runs each session in an isolated git worktree. Uncommitted files will\nnot carry over to new sessions. Commit the files you just created so they are\navailable in every future workspace:\n\n git add CLAUDE.md .claude/ AGENTS.md .agents/ 2>/dev/null; git status\n\nShow the user which files are staged and ask them to confirm before committing.\nUse a commit message like `[dh] feat: add Coasts skill for Conductor`.\n", + "harnesses/cursor_setup_prompt.txt": "You are setting up the Coasts skill for Cursor in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nAsk the user: should I set this up **globally** (available in every project on this machine) or **for this project only**?\n\n## Step 4: Place files\n\nIf a target file already exists, append the Coast Runtime section rather than overwriting — but check first whether a `# Coast Runtime` section is already present and skip if so.\n\n**Global setup:**\n- Append the Coast Runtime rules to the user's global Cursor rules\n- Write the Coasts skill to `~/.cursor/skills/coasts/SKILL.md`\n\n**Project setup:**\n- Append the Coast Runtime rules to `AGENTS.md` at the project root (or `.cursor/rules/coast.md` if the user prefers Cursor-native rules — ask them)\n- Write the Coasts skill to `.cursor/skills/coasts/SKILL.md`\n\n## Step 5: Update the Coastfile\n\nRead the `Coastfile` at the project root. Look at the `worktree_dir` field in the\n`[coast]` section. Also read the `name` field — you will need it to construct the\nworktree path.\n\nThe Cursor worktree directory is `~/.cursor/worktrees/`, where `` is\nthe value of the `name` field in the Coastfile.\n\nIf that path is **not** already listed in `worktree_dir`:\n\n- If `worktree_dir` is a single string, convert it to an array and append the\n Cursor path. For example, `worktree_dir = \".worktrees\"` becomes\n `worktree_dir = [\".worktrees\", \"~/.cursor/worktrees/my-app\"]`.\n- If `worktree_dir` is already an array, append the Cursor path to it.\n- If `worktree_dir` is not present at all, add\n `worktree_dir = [\".worktrees\", \"~/.cursor/worktrees/\"]` using the\n actual project name.\n\nThis is an external path, so if a Coast instance is already running for this\nproject it must be recreated with `coast run` for the new bind mount to take\neffect. Tell the user this.\n\n## Step 6: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Restart Cursor** for the skill and rules changes to take\neffect.\n", + "harnesses/shep_setup_prompt.txt": "You are setting up the Coasts skill for Shep in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Place files\n\nShep wraps Claude Code, so use the Claude Code file layout. If a target file\nalready exists, append the Coast Runtime section rather than overwriting — but\ncheck first whether a `# Coast Runtime` section is already present and skip if\nso.\n\n- Append the Coast Runtime rules to `CLAUDE.md` at the project root\n- Write the Coasts skill to `.agents/skills/coasts/SKILL.md` (or\n `.claude/skills/coasts/SKILL.md` if the project already uses that layout)\n\n## Step 4: Update the Coastfile\n\nRead the `Coastfile` at the project root. Look at the `worktree_dir` field in the\n`[coast]` section.\n\nIf `~/.shep/repos/*/wt` is **not** already listed in `worktree_dir`:\n\n- If `worktree_dir` is a single string, convert it to an array and append\n `~/.shep/repos/*/wt`. For example, `worktree_dir = \".worktrees\"` becomes\n `worktree_dir = [\".worktrees\", \"~/.shep/repos/*/wt\"]`.\n- If `worktree_dir` is already an array, append `~/.shep/repos/*/wt` to it.\n- If `worktree_dir` is not present at all, add\n `worktree_dir = [\".worktrees\", \"~/.shep/repos/*/wt\"]`.\n\nThe `*` is a glob pattern that matches the per-repo hash directory. Coasts\nexpands it at runtime.\n\nThis is an external path, so if a Coast instance is already running for this\nproject it must be recreated with `coast run` for the new bind mount to take\neffect. Tell the user this.\n\n## Step 5: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\n\nTell the user: **Quit and reopen your editor** for the skill and CLAUDE.md\nchanges to take effect.\n", + "harnesses/t3_code_setup_prompt.txt": "You are setting up the Coasts skill for T3 Code in this project. Run all\ncommands from the project root.\n\n## Step 1: Check for Coast CLI\n\nRun this command:\n\n coast --version\n\nIf the `coast` command is not found, stop here and tell the user:\n\n \"The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started\"\n\nDo not continue until the CLI is available.\n\n## Step 2: Get the skill content\n\nRun this command:\n\n coast skills-prompt\n\nThe output has two parts:\n\n- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---`\n- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them)\n\nSave both parts for the steps below.\n\n## Step 3: Ask the user\n\nT3 Code supports multiple providers. Ask the user: which providers do you use\nin T3 Code?\n\n- **Codex** (OpenAI)\n- **Claude** (Anthropic)\n- **Both**\n- **Other** — if the user names a different provider, run\n `coast docs --path SKILLS_FOR_HOST_AGENTS.md` and\n `coast docs --path harnesses/README.md` to determine where that provider\n expects project instructions and skills, then follow the same pattern\n\n## Step 4: Place files\n\nT3 Code setup is always project-level. If a target file already exists, append\nthe Coast Runtime section rather than overwriting — but check first whether a\n`# Coast Runtime` section is already present and skip if so.\n\n**If the user selected Codex (or both):**\n- Append the Coast Runtime rules to `AGENTS.md` at the project root\n- Write the Coasts skill to `.agents/skills/coasts/SKILL.md` (project-level)\n- Also write the Coasts skill to `~/.codex/skills/coasts/SKILL.md` (global\n fallback — T3 Code may not scan project-level `.agents/skills/` yet)\n- Create `.agents/skills/coasts/agents/openai.yaml` with this content:\n\ninterface:\n display_name: \"Coasts\"\n short_description: \"Inspect, assign, and open Coasts for this repo\"\n default_prompt: \"Use this skill when the user wants help finding, assigning, or opening a Coast.\"\n\npolicy:\n allow_implicit_invocation: false\n\n**If the user selected Claude (or both):**\n- Append the Coast Runtime rules to `CLAUDE.md` at the project root\n- Write the Coasts skill to `.claude/skills/coasts/SKILL.md` (project-level)\n- Also write the Coasts skill to `~/.claude/skills/coasts/SKILL.md` (global\n fallback — T3 Code may not scan project-level `.claude/skills/` reliably)\n\n**Note:** T3 Code may not load project-level skills from `.agents/skills/` or\n`.claude/skills/` yet. The `AGENTS.md` and `CLAUDE.md` rules still apply on\nevery task regardless. The global `~/.codex/skills/coasts/` fallback ensures\nthe Codex provider can find the skill.\n\n## Step 5: Update the Coastfile\n\nRead the `Coastfile` at the project root. Look at the `worktree_dir` field in the\n`[coast]` section. Also read the `name` field — you will need it to construct the\nworktree path.\n\nThe T3 Code worktree directory is `~/.t3/worktrees/`, where `` is\nthe value of the `name` field in the Coastfile.\n\nIf that path is **not** already listed in `worktree_dir`:\n\n- If `worktree_dir` is a single string, convert it to an array and append the\n T3 Code path. For example, `worktree_dir = \".worktrees\"` becomes\n `worktree_dir = [\".worktrees\", \"~/.t3/worktrees/my-app\"]`.\n- If `worktree_dir` is already an array, append the T3 Code path to it.\n- If `worktree_dir` is not present at all, add\n `worktree_dir = [\".worktrees\", \"~/.t3/worktrees/\"]` using the\n actual project name.\n\nThis is an external path, so if a Coast instance is already running for this\nproject it must be recreated with `coast run` for the new bind mount to take\neffect. Tell the user this.\n\n## Step 6: Confirm\n\nAfter placing files, show the user a summary of what was created and where.\nExplain that the global `~/.codex/skills/coasts/` copy is a workaround for\nT3 Code not loading project-level skills yet.\n\nTell the user: **Restart T3 Code** for the skill and rules changes to take\neffect.\n", "learn-coasts-videos/README.md": "# Coasts Video Course\n\nA short video course covering the core ideas behind Coasts. Each lesson is under three minutes. Watch them in order for the full picture, or jump to the topic you need.\n\n```youtube\nMBGKSKau4sU\n```\n\n## Lessons\n\n1. [Coasts](coasts.md) — what a Coast is and how the core model works.\n2. [Ports](ports.md) — how Coasts handle port isolation and parallel runtime access.\n3. [Assign](assign.md) — switching a running Coast between worktrees.\n4. [Checkout](checkout.md) — bringing a Coast onto your canonical ports for active use.\n5. [Volumes](volumes.md) — how Coasts deal with volumes and persistent service state.\n6. [Secrets](secrets.md) — managing secrets inside a Coast.\n7. [Getting Started](getting-started.md) — a hands-on walkthrough for trying Coasts on a real project.\n8. [Coast UI](coast-ui.md) — the Coastguard UI and the runtime information it exposes.\n", "learn-coasts-videos/assign.md": "# Assign\n\n```youtube\nLYCeequ54nk\n```\n\nAssign moves a running Coast from one worktree to another without tearing down the runtime. This video covers how assignment works and when you would use it to hand a Coast to a different branch.\n\nFor the full reference, see [Assign and Unassign](../concepts_and_terminology/ASSIGN.md).\n", "learn-coasts-videos/checkout.md": "# Checkout\n\n```youtube\nJRAXkM4U1UE\n```\n\nCheckout maps your project's canonical ports to a specific Coast instance. This video shows how to bring one Coast to the front so your browser, API clients, and test suites all hit the right environment without changing any port numbers.\n\nFor the full reference, see [Checkout](../concepts_and_terminology/CHECKOUT.md).\n", diff --git a/coast-guard/src/locales/en.json b/coast-guard/src/locales/en.json index 0fd21fc..2a52aa4 100644 --- a/coast-guard/src/locales/en.json +++ b/coast-guard/src/locales/en.json @@ -569,6 +569,7 @@ "docs.nav.harnessCursor": "Cursor", "docs.nav.harnessConductor": "Conductor", "docs.nav.harnessT3Code": "T3 Code", + "docs.nav.harnessShep": "Shep", "docs.nav.harnessMultipleHarnesses": "Multiple Harnesses", "docs.nav.recipes": "Recipes", "docs.nav.recipesFullstackMonorepo": "Full-Stack Monorepo" diff --git a/coast-guard/src/locales/es.json b/coast-guard/src/locales/es.json index 94d1b37..d92773a 100644 --- a/coast-guard/src/locales/es.json +++ b/coast-guard/src/locales/es.json @@ -518,6 +518,7 @@ "docs.nav.harnessCursor": "Cursor", "docs.nav.harnessConductor": "Conductor", "docs.nav.harnessT3Code": "T3 Code", + "docs.nav.harnessShep": "Shep", "docs.nav.harnessMultipleHarnesses": "Multiple Harnesses", "docs.nav.recipes": "Recetas", "docs.nav.recipesFullstackMonorepo": "Monorepo Full-Stack" diff --git a/coast-guard/src/locales/ja.json b/coast-guard/src/locales/ja.json index 6d061d3..d3616c5 100644 --- a/coast-guard/src/locales/ja.json +++ b/coast-guard/src/locales/ja.json @@ -533,6 +533,7 @@ "docs.nav.harnessCursor": "Cursor", "docs.nav.harnessConductor": "Conductor", "docs.nav.harnessT3Code": "T3 Code", + "docs.nav.harnessShep": "Shep", "docs.nav.harnessMultipleHarnesses": "Multiple Harnesses", "docs.nav.recipes": "レシピ", "docs.nav.recipesFullstackMonorepo": "フルスタック Monorepo" diff --git a/coast-guard/src/locales/ko.json b/coast-guard/src/locales/ko.json index ded0b55..7044728 100644 --- a/coast-guard/src/locales/ko.json +++ b/coast-guard/src/locales/ko.json @@ -533,6 +533,7 @@ "docs.nav.harnessCursor": "Cursor", "docs.nav.harnessConductor": "Conductor", "docs.nav.harnessT3Code": "T3 Code", + "docs.nav.harnessShep": "Shep", "docs.nav.harnessMultipleHarnesses": "Multiple Harnesses", "docs.nav.recipes": "레시피", "docs.nav.recipesFullstackMonorepo": "풀스택 Monorepo" diff --git a/coast-guard/src/locales/pt.json b/coast-guard/src/locales/pt.json index 4fd3a92..715868a 100644 --- a/coast-guard/src/locales/pt.json +++ b/coast-guard/src/locales/pt.json @@ -518,6 +518,7 @@ "docs.nav.harnessCursor": "Cursor", "docs.nav.harnessConductor": "Conductor", "docs.nav.harnessT3Code": "T3 Code", + "docs.nav.harnessShep": "Shep", "docs.nav.harnessMultipleHarnesses": "Multiple Harnesses", "docs.nav.recipes": "Receitas", "docs.nav.recipesFullstackMonorepo": "Monorepo Full-Stack" diff --git a/coast-guard/src/locales/ru.json b/coast-guard/src/locales/ru.json index a09d1ba..eaeb6bc 100644 --- a/coast-guard/src/locales/ru.json +++ b/coast-guard/src/locales/ru.json @@ -520,6 +520,7 @@ "docs.nav.harnessCursor": "Cursor", "docs.nav.harnessConductor": "Conductor", "docs.nav.harnessT3Code": "T3 Code", + "docs.nav.harnessShep": "Shep", "docs.nav.harnessMultipleHarnesses": "Multiple Harnesses", "docs.nav.recipes": "Рецепты", "docs.nav.recipesFullstackMonorepo": "Фулстек-монорепо" diff --git a/coast-guard/src/locales/zh.json b/coast-guard/src/locales/zh.json index 982fe28..76acbdc 100644 --- a/coast-guard/src/locales/zh.json +++ b/coast-guard/src/locales/zh.json @@ -533,6 +533,7 @@ "docs.nav.harnessCursor": "Cursor", "docs.nav.harnessConductor": "Conductor", "docs.nav.harnessT3Code": "T3 Code", + "docs.nav.harnessShep": "Shep", "docs.nav.harnessMultipleHarnesses": "Multiple Harnesses", "docs.nav.recipes": "配置示例", "docs.nav.recipesFullstackMonorepo": "全栈 Monorepo" diff --git a/docs/coastfiles/WORKTREE_DIR.md b/docs/coastfiles/WORKTREE_DIR.md index bef08c8..0fefd73 100644 --- a/docs/coastfiles/WORKTREE_DIR.md +++ b/docs/coastfiles/WORKTREE_DIR.md @@ -45,6 +45,27 @@ Paths starting with `/` are also treated as external and get their own bind moun worktree_dir = ["/shared/worktrees", ".worktrees"] ``` +### Glob patterns (external) + +External paths can contain glob metacharacters (`*`, `?`, `[...]`). Coast expands them at runtime against the host filesystem, creating a bind mount for each matching directory. + +```toml +worktree_dir = [".worktrees", "~/.shep/repos/*/wt"] +``` + +This is useful when a tool generates worktrees under a path component that varies per project (like a hash). The `*` matches any single directory name, so `~/.shep/repos/*/wt` matches `~/.shep/repos/a21f0cda9ab9d456/wt` and any other hash directory that contains a `wt` subdirectory. + +Supported glob syntax: + +- `*` — matches any sequence of characters within a single path component +- `?` — matches any single character +- `[abc]` — matches any character in the set +- `[!abc]` — matches any character not in the set + +Glob expansion happens everywhere worktree dirs are resolved: container creation, assign, start, lookup, and the git watcher. Matches are sorted for deterministic ordering. If a glob matches no directories, it is silently skipped. + +Like other external paths, the container must be recreated (`coast run`) after adding a glob pattern for the bind mount to take effect. + ## How external directories work When Coast encounters an external worktree directory (tilde or absolute path), three things happen: @@ -100,12 +121,22 @@ name = "my-app" worktree_dir = [".worktrees", ".claude/worktrees"] ``` -### All three together +### Shep integration + +Shep creates worktrees at `~/.shep/repos/{hash}/wt/{branch-slug}` where the hash is per-repo. Use a glob pattern to match the hash directory: ```toml [coast] name = "my-app" -worktree_dir = [".worktrees", ".claude/worktrees", "~/.codex/worktrees"] +worktree_dir = [".worktrees", "~/.shep/repos/*/wt"] +``` + +### All harnesses together + +```toml +[coast] +name = "my-app" +worktree_dir = [".worktrees", ".claude/worktrees", "~/.codex/worktrees", "~/.shep/repos/*/wt"] ``` ## Live Coastfile reading diff --git a/docs/doc_ordering.txt b/docs/doc_ordering.txt index 32e2ae3..665342f 100644 --- a/docs/doc_ordering.txt +++ b/docs/doc_ordering.txt @@ -21,6 +21,7 @@ harnesses/CONDUCTOR.md harnesses/CLAUDE_CODE.md harnesses/CURSOR.md harnesses/T3_CODE.md +harnesses/SHEP.md harnesses/MULTIPLE_HARNESSES.md # Concepts and Terminology diff --git a/docs/harnesses/README.md b/docs/harnesses/README.md index cf9a245..91c3212 100644 --- a/docs/harnesses/README.md +++ b/docs/harnesses/README.md @@ -16,6 +16,7 @@ If one repo is used from multiple harnesses, see [Multiple Harnesses](MULTIPLE_H | Cursor | `~/.cursor/worktrees/` | `AGENTS.md` or `.cursor/rules/` | `.cursor/skills/` or `.agents/skills/` | `.cursor/commands/` | [Cursor](CURSOR.md) | | Conductor | `~/conductor/workspaces/` | `CLAUDE.md` | -- | -- | [Conductor](CONDUCTOR.md) | | T3 Code | `~/.t3/worktrees/` | `AGENTS.md` | `.agents/skills/` | -- | [T3 Code](T3_CODE.md) | +| Shep | `~/.shep/repos/*/wt` | `CLAUDE.md` | `.agents/skills/` or `.claude/skills/` | -- | [Shep](SHEP.md) | ## Skills vs Commands diff --git a/docs/harnesses/SHEP.md b/docs/harnesses/SHEP.md new file mode 100644 index 0000000..6cfc330 --- /dev/null +++ b/docs/harnesses/SHEP.md @@ -0,0 +1,117 @@ +# Shep + +## Quick setup + +Requires the [Coast CLI](../GETTING_STARTED.md). Copy this prompt into your +agent's chat to set up Coasts automatically: + +```prompt-copy +shep_setup_prompt.txt +``` + +You can also get the skill content from the CLI: `coast skills-prompt`. + +After setup, **quit and reopen your editor** for the new skill and project +instructions to take effect. + +--- + +[Shep](https://shep-ai.github.io/cli/) creates worktrees at `~/.shep/repos/{hash}/wt/{branch-slug}`. The hash is the first 16 hex characters of the SHA-256 of the repository's absolute path, so it is deterministic per-repo but opaque. All worktrees for a given repo share the same hash and are differentiated by the `wt/{branch-slug}` subdirectory. + +From the Shep CLI, `shep feat show ` prints the worktree path, or +`ls ~/.shep/repos` lists the per-repo hash directories. + +Because the hash varies per repo, Coasts uses a **glob pattern** to discover +shep worktrees without requiring the user to hard-code the hash. + +## Setup + +Add `~/.shep/repos/*/wt` to `worktree_dir`: + +```toml +[coast] +name = "my-app" +worktree_dir = [".worktrees", "~/.shep/repos/*/wt"] +``` + +The `*` matches the per-repo hash directory. At runtime Coasts expands the glob, +finds the matching directory (e.g. `~/.shep/repos/a21f0cda9ab9d456/wt`), and +bind-mounts it into the container. See +[Worktree Directories](../coastfiles/WORKTREE_DIR.md) for full details on glob +patterns. + +After changing `worktree_dir`, existing instances must be **recreated** for the bind mount to take effect: + +```bash +coast rm my-instance +coast build +coast run my-instance +``` + +The worktree listing updates immediately (Coasts reads the new Coastfile), but +assigning to a Shep worktree requires the bind mount inside the container. + +## Where Coasts guidance goes + +Shep wraps Claude Code under the hood, so follow the Claude Code conventions: + +- put the short Coast Runtime rules in `CLAUDE.md` +- put the reusable `/coasts` workflow in `.claude/skills/coasts/SKILL.md` or + the shared `.agents/skills/coasts/SKILL.md` +- if this repo also uses other harnesses, see + [Multiple Harnesses](MULTIPLE_HARNESSES.md) and + [Skills for Host Agents](../SKILLS_FOR_HOST_AGENTS.md) + +## What Coasts does + +- **Run** -- `coast run ` creates a new Coast instance from the latest build. Use `coast run -w ` to create and assign a Shep worktree in one step. See [Run](../concepts_and_terminology/RUN.md). +- **Bind mount** -- At container creation, Coasts resolves the glob + `~/.shep/repos/*/wt` and mounts each matching directory into the container at + `/host-external-wt/{index}`. +- **Discovery** -- `git worktree list --porcelain` is repo-scoped, so only + worktrees belonging to the current project appear. +- **Naming** -- Shep worktrees use named branches, so they appear by branch + name in the Coasts UI and CLI (e.g., `feat-green-background`). +- **Assign** -- `coast assign` remounts `/workspace` from the external bind mount path. +- **Gitignored sync** -- Runs on the host filesystem with absolute paths, works without the bind mount. +- **Orphan detection** -- The git watcher scans external directories + recursively, filtering by `.git` gitdir pointers. If Shep deletes a + worktree, Coasts auto-unassigns the instance. + +## Example + +```toml +[coast] +name = "my-app" +compose = "./docker-compose.yml" +worktree_dir = [".worktrees", "~/.shep/repos/*/wt"] +primary_port = "web" + +[ports] +web = 3000 +api = 8080 + +[assign] +default = "none" +[assign.services] +web = "hot" +api = "hot" +``` + +- `~/.shep/repos/*/wt` -- Shep (external, bind-mounted via glob expansion) + +## Shep path structure + +``` +~/.shep/repos/ + {sha256-of-repo-path-first-16-chars}/ + wt/ + {branch-slug}/ <-- git worktree + {branch-slug}/ +``` + +Key points: +- Same repo = same hash every time (deterministic, not random) +- Different repos = different hashes +- Path separators are normalized to `/` before hashing +- The hash can be found via `shep feat show ` or `ls ~/.shep/repos` diff --git a/docs/harnesses/shep_setup_prompt.txt b/docs/harnesses/shep_setup_prompt.txt new file mode 100644 index 0000000..6f3431e --- /dev/null +++ b/docs/harnesses/shep_setup_prompt.txt @@ -0,0 +1,66 @@ +You are setting up the Coasts skill for Shep in this project. Run all +commands from the project root. + +## Step 1: Check for Coast CLI + +Run this command: + + coast --version + +If the `coast` command is not found, stop here and tell the user: + + "The Coast CLI is not installed. Install it first: https://coasts.dev/docs/getting-started" + +Do not continue until the CLI is available. + +## Step 2: Get the skill content + +Run this command: + + coast skills-prompt + +The output has two parts: + +- **Coast Runtime rules** — everything from the start up to (but not including) the line that begins with `---` +- **Coasts skill** — everything from the `---` frontmatter block onward (including the `---` lines and everything after them) + +Save both parts for the steps below. + +## Step 3: Place files + +Shep wraps Claude Code, so use the Claude Code file layout. If a target file +already exists, append the Coast Runtime section rather than overwriting — but +check first whether a `# Coast Runtime` section is already present and skip if +so. + +- Append the Coast Runtime rules to `CLAUDE.md` at the project root +- Write the Coasts skill to `.agents/skills/coasts/SKILL.md` (or + `.claude/skills/coasts/SKILL.md` if the project already uses that layout) + +## Step 4: Update the Coastfile + +Read the `Coastfile` at the project root. Look at the `worktree_dir` field in the +`[coast]` section. + +If `~/.shep/repos/*/wt` is **not** already listed in `worktree_dir`: + +- If `worktree_dir` is a single string, convert it to an array and append + `~/.shep/repos/*/wt`. For example, `worktree_dir = ".worktrees"` becomes + `worktree_dir = [".worktrees", "~/.shep/repos/*/wt"]`. +- If `worktree_dir` is already an array, append `~/.shep/repos/*/wt` to it. +- If `worktree_dir` is not present at all, add + `worktree_dir = [".worktrees", "~/.shep/repos/*/wt"]`. + +The `*` is a glob pattern that matches the per-repo hash directory. Coasts +expands it at runtime. + +This is an external path, so if a Coast instance is already running for this +project it must be recreated with `coast run` for the new bind mount to take +effect. Tell the user this. + +## Step 5: Confirm + +After placing files, show the user a summary of what was created and where. + +Tell the user: **Quit and reopen your editor** for the skill and CLAUDE.md +changes to take effect.